summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/mongo/SConscript2
-rw-r--r--src/mongo/client/parallel.cpp6
-rw-r--r--src/mongo/client/remote_command_runner_impl.cpp2
-rw-r--r--src/mongo/db/catalog/coll_mod.cpp2
-rw-r--r--src/mongo/db/commands/find_cmd.cpp36
-rw-r--r--src/mongo/db/commands/geo_near_cmd.cpp18
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp10
-rw-r--r--src/mongo/db/commands/index_filter_commands_test.cpp30
-rw-r--r--src/mongo/db/commands/mr.cpp14
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp18
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp64
-rw-r--r--src/mongo/db/dbcommands.cpp14
-rw-r--r--src/mongo/db/dbhelpers.cpp6
-rw-r--r--src/mongo/db/exec/cached_plan.cpp4
-rw-r--r--src/mongo/db/exec/idhack.cpp8
-rw-r--r--src/mongo/db/exec/multi_plan.cpp9
-rw-r--r--src/mongo/db/exec/projection_exec.cpp14
-rw-r--r--src/mongo/db/exec/sort.cpp1
-rw-r--r--src/mongo/db/exec/sort_key_generator.cpp12
-rw-r--r--src/mongo/db/exec/subplan.cpp14
-rw-r--r--src/mongo/db/ops/parsed_delete.cpp16
-rw-r--r--src/mongo/db/ops/parsed_update.cpp16
-rw-r--r--src/mongo/db/ops/update_driver.cpp6
-rw-r--r--src/mongo/db/pipeline/pipeline.cpp2
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp12
-rw-r--r--src/mongo/db/query/SConscript14
-rw-r--r--src/mongo/db/query/canonical_query.cpp108
-rw-r--r--src/mongo/db/query/canonical_query.h26
-rw-r--r--src/mongo/db/query/canonical_query_test.cpp208
-rw-r--r--src/mongo/db/query/find.cpp46
-rw-r--r--src/mongo/db/query/find_common.cpp12
-rw-r--r--src/mongo/db/query/find_common.h6
-rw-r--r--src/mongo/db/query/get_executor.cpp57
-rw-r--r--src/mongo/db/query/get_executor_test.cpp10
-rw-r--r--src/mongo/db/query/getmore_request.cpp2
-rw-r--r--src/mongo/db/query/lite_parsed_query_test.cpp1097
-rw-r--r--src/mongo/db/query/parsed_projection.cpp26
-rw-r--r--src/mongo/db/query/plan_cache.cpp32
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp96
-rw-r--r--src/mongo/db/query/plan_executor.cpp2
-rw-r--r--src/mongo/db/query/planner_access.cpp29
-rw-r--r--src/mongo/db/query/planner_analysis.cpp42
-rw-r--r--src/mongo/db/query/query_planner.cpp30
-rw-r--r--src/mongo/db/query/query_planner_test.cpp20
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.cpp54
-rw-r--r--src/mongo/db/query/query_request.cpp (renamed from src/mongo/db/query/lite_parsed_query.cpp)155
-rw-r--r--src/mongo/db/query/query_request.h (renamed from src/mongo/db/query/lite_parsed_query.h)23
-rw-r--r--src/mongo/db/query/query_request_test.cpp1097
-rw-r--r--src/mongo/db/query/query_settings.cpp8
-rw-r--r--src/mongo/db/ttl.cpp6
-rw-r--r--src/mongo/dbtests/documentsourcetests.cpp6
-rw-r--r--src/mongo/dbtests/executor_registry.cpp4
-rw-r--r--src/mongo/dbtests/oplogstarttests.cpp6
-rw-r--r--src/mongo/dbtests/plan_ranking.cpp86
-rw-r--r--src/mongo/dbtests/query_plan_executor.cpp10
-rw-r--r--src/mongo/dbtests/query_stage_cached_plan.cpp12
-rw-r--r--src/mongo/dbtests/query_stage_delete.cpp6
-rw-r--r--src/mongo/dbtests/query_stage_multiplan.cpp26
-rw-r--r--src/mongo/dbtests/query_stage_subplan.cpp43
-rw-r--r--src/mongo/dbtests/query_stage_update.cpp6
-rw-r--r--src/mongo/dbtests/querytests.cpp1
-rw-r--r--src/mongo/executor/SConscript2
-rw-r--r--src/mongo/executor/downconvert_find_and_getmore_commands.cpp44
-rw-r--r--src/mongo/executor/network_interface_asio_operation.cpp4
-rw-r--r--src/mongo/s/SConscript2
-rw-r--r--src/mongo/s/balancer/balancer.cpp4
-rw-r--r--src/mongo/s/balancer/balancer_configuration_test.cpp5
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp6
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_drop_coll_test.cpp1
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_remove_shard_test.cpp4
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp6
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp102
-rw-r--r--src/mongo/s/catalog/replset/catalog_manager_replica_set_write_retry_test.cpp8
-rw-r--r--src/mongo/s/chunk_manager.cpp6
-rw-r--r--src/mongo/s/chunk_manager_targeter_test.cpp6
-rw-r--r--src/mongo/s/client/shard_remote.cpp25
-rw-r--r--src/mongo/s/commands/cluster_count_cmd.cpp2
-rw-r--r--src/mongo/s/commands/cluster_explain.cpp1
-rw-r--r--src/mongo/s/commands/cluster_explain_cmd.cpp1
-rw-r--r--src/mongo/s/commands/cluster_find_cmd.cpp18
-rw-r--r--src/mongo/s/commands/cluster_map_reduce_cmd.cpp4
-rw-r--r--src/mongo/s/commands/cluster_pipeline_cmd.cpp8
-rw-r--r--src/mongo/s/commands/commands_public.cpp1
-rw-r--r--src/mongo/s/commands/strategy.cpp22
-rw-r--r--src/mongo/s/commands/strategy.h4
-rw-r--r--src/mongo/s/query/SConscript2
-rw-r--r--src/mongo/s/query/async_results_merger_test.cpp20
-rw-r--r--src/mongo/s/query/cluster_find.cpp91
-rw-r--r--src/mongo/s/set_shard_version_request.cpp4
-rw-r--r--src/mongo/s/shard_key_pattern.cpp6
-rw-r--r--src/mongo/s/sharding_test_fixture.cpp4
-rw-r--r--src/mongo/shell/bench.cpp50
92 files changed, 2102 insertions, 2109 deletions
diff --git a/src/mongo/SConscript b/src/mongo/SConscript
index cf87b163ea8..e2308a791a6 100644
--- a/src/mongo/SConscript
+++ b/src/mongo/SConscript
@@ -359,7 +359,7 @@ if not has_option('noshell') and usemozjs:
'db/catalog/index_key_validate',
'db/index/external_key_generator',
'db/query/command_request_response',
- 'db/query/lite_parsed_query',
+ 'db/query/query_request',
'db/server_options_core',
'linenoise_utf8',
'rpc/protocol',
diff --git a/src/mongo/client/parallel.cpp b/src/mongo/client/parallel.cpp
index f3380ff86a9..4a1f9d010a1 100644
--- a/src/mongo/client/parallel.cpp
+++ b/src/mongo/client/parallel.cpp
@@ -40,7 +40,7 @@
#include "mongo/client/dbclientcursor.h"
#include "mongo/client/replica_set_monitor.h"
#include "mongo/db/bson/dotted_path_support.h"
-#include "mongo/db/query/lite_parsed_query.h"
+#include "mongo/db/query/query_request.h"
#include "mongo/s/catalog/catalog_cache.h"
#include "mongo/s/chunk_manager.h"
#include "mongo/s/client/shard_registry.h"
@@ -172,7 +172,7 @@ void ParallelSortClusteredCursor::_finishCons() {
BSONObjIterator sortKeyIt(_sortKey);
while (sortKeyIt.more()) {
BSONElement e = sortKeyIt.next();
- if (LiteParsedQuery::isTextScoreMeta(e)) {
+ if (QueryRequest::isTextScoreMeta(e)) {
textMetaSortKeyFields.insert(e.fieldName());
transformedSortKeyBuilder.append(e.fieldName(), -1);
} else {
@@ -196,7 +196,7 @@ void ParallelSortClusteredCursor::_finishCons() {
string fieldName = e.fieldName();
- if (LiteParsedQuery::isTextScoreMeta(e)) {
+ if (QueryRequest::isTextScoreMeta(e)) {
textMetaSortKeyFields.erase(fieldName);
} else {
// exact field
diff --git a/src/mongo/client/remote_command_runner_impl.cpp b/src/mongo/client/remote_command_runner_impl.cpp
index 353d133c58c..4c6703dc8b0 100644
--- a/src/mongo/client/remote_command_runner_impl.cpp
+++ b/src/mongo/client/remote_command_runner_impl.cpp
@@ -173,7 +173,7 @@ StatusWith<RemoteCommandResponse> RemoteCommandRunnerImpl::runCommand(
// 'commandName' will be an empty string if the command object is an empty BSON
// document.
StringData commandName = request.cmdObj.firstElement().fieldNameStringData();
- const auto isFindCmd = commandName == LiteParsedQuery::kFindCommandName;
+ const auto isFindCmd = commandName == QueryRequest::kFindCommandName;
const auto isGetMoreCmd = commandName == GetMoreRequest::kGetMoreCommandName;
const auto isFindOrGetMoreCmd = isFindCmd || isGetMoreCmd;
diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp
index b73b732b723..dde29faa503 100644
--- a/src/mongo/db/catalog/coll_mod.cpp
+++ b/src/mongo/db/catalog/coll_mod.cpp
@@ -80,7 +80,7 @@ Status collMod(OperationContext* txn,
// no-op
} else if (str::startsWith(e.fieldName(), "$")) {
// no-op ignore top-level fields prefixed with $. They are for the command processor
- } else if (LiteParsedQuery::cmdOptionMaxTimeMS == e.fieldNameStringData()) {
+ } else if (QueryRequest::cmdOptionMaxTimeMS == e.fieldNameStringData()) {
// no-op
} else if (str::equals("index", e.fieldName())) {
BSONObj indexObj = e.Obj();
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index 5573e8fc819..15b512b5d71 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -136,18 +136,18 @@ public:
str::stream() << "Invalid collection name: " << nss.ns()};
}
- // Parse the command BSON to a LiteParsedQuery.
+ // Parse the command BSON to a QueryRequest.
const bool isExplain = true;
- auto lpqStatus = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- if (!lpqStatus.isOK()) {
- return lpqStatus.getStatus();
+ auto qrStatus = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ if (!qrStatus.isOK()) {
+ return qrStatus.getStatus();
}
- // Finish the parsing step by using the LiteParsedQuery to create a CanonicalQuery.
+ // Finish the parsing step by using the QueryRequest to create a CanonicalQuery.
ExtensionsCallbackReal extensionsCallback(txn, &nss);
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn, std::move(lpqStatus.getValue()), extensionsCallback);
+ CanonicalQuery::canonicalize(txn, std::move(qrStatus.getValue()), extensionsCallback);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
@@ -202,17 +202,17 @@ public:
Status(ErrorCodes::IllegalOperation, "Cannot run find command from eval()"));
}
- // Parse the command BSON to a LiteParsedQuery.
+ // Parse the command BSON to a QueryRequest.
const bool isExplain = false;
- auto lpqStatus = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- if (!lpqStatus.isOK()) {
- return appendCommandStatus(result, lpqStatus.getStatus());
+ auto qrStatus = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ if (!qrStatus.isOK()) {
+ return appendCommandStatus(result, qrStatus.getStatus());
}
- auto& lpq = lpqStatus.getValue();
+ auto& qr = qrStatus.getValue();
// Validate term before acquiring locks, if provided.
- if (auto term = lpq->getReplicationTerm()) {
+ if (auto term = qr->getReplicationTerm()) {
auto replCoord = repl::ReplicationCoordinator::get(txn);
Status status = replCoord->updateTerm(txn, *term);
// Note: updateTerm returns ok if term stayed the same.
@@ -230,9 +230,9 @@ public:
const int ntoskip = -1;
beginQueryOp(txn, nss, cmdObj, ntoreturn, ntoskip);
- // Finish the parsing step by using the LiteParsedQuery to create a CanonicalQuery.
+ // Finish the parsing step by using the QueryRequest to create a CanonicalQuery.
ExtensionsCallbackReal extensionsCallback(txn, &nss);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(lpq), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
if (!statusWithCQ.isOK()) {
return appendCommandStatus(result, statusWithCQ.getStatus());
}
@@ -266,14 +266,14 @@ public:
return true;
}
- const LiteParsedQuery& pq = exec->getCanonicalQuery()->getParsed();
+ const QueryRequest& originalQR = exec->getCanonicalQuery()->getQueryRequest();
// Stream query results, adding them to a BSONArray as we go.
CursorResponseBuilder firstBatch(/*isInitialResponse*/ true, &result);
BSONObj obj;
PlanExecutor::ExecState state = PlanExecutor::ADVANCED;
long long numResults = 0;
- while (!FindCommon::enoughForFirstBatch(pq, numResults) &&
+ while (!FindCommon::enoughForFirstBatch(originalQR, numResults) &&
PlanExecutor::ADVANCED == (state = exec->getNext(&obj, NULL))) {
// If we can't fit this result inside the current batch, then we stash it for later.
if (!FindCommon::haveSpaceForNext(obj, numResults, firstBatch.bytesUsed())) {
@@ -320,8 +320,8 @@ public:
exec.release(),
nss.ns(),
txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
- pq.getOptions(),
- pq.getFilter());
+ originalQR.getOptions(),
+ originalQR.getFilter());
cursorId = cursor->cursorid();
invariant(!exec);
diff --git a/src/mongo/db/commands/geo_near_cmd.cpp b/src/mongo/db/commands/geo_near_cmd.cpp
index 5de86582d63..1fb9cd3cdeb 100644
--- a/src/mongo/db/commands/geo_near_cmd.cpp
+++ b/src/mongo/db/commands/geo_near_cmd.cpp
@@ -202,16 +202,16 @@ public:
uassert(17297, "distanceMultiplier must be non-negative", distanceMultiplier >= 0);
}
- BSONObj projObj = BSON("$pt" << BSON("$meta" << LiteParsedQuery::metaGeoNearPoint) << "$dis"
- << BSON("$meta" << LiteParsedQuery::metaGeoNearDistance));
-
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(rewritten);
- lpq->setProj(projObj);
- lpq->setLimit(numWanted);
- lpq->setCollation(collation);
+ BSONObj projObj = BSON("$pt" << BSON("$meta" << QueryRequest::metaGeoNearPoint) << "$dis"
+ << BSON("$meta" << QueryRequest::metaGeoNearDistance));
+
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(rewritten);
+ qr->setProj(projObj);
+ qr->setLimit(numWanted);
+ qr->setCollation(collation);
const ExtensionsCallbackReal extensionsCallback(txn, &nss);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(lpq), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
if (!statusWithCQ.isOK()) {
errmsg = "Can't parse filter / create query";
return false;
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index ecf27a3f1d8..627a55cead2 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -322,11 +322,11 @@ Status ClearFilters::clear(OperationContext* txn,
invariant(entry);
// Create canonical query.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(entry->query);
- lpq->setSort(entry->sort);
- lpq->setProj(entry->projection);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(lpq), extensionsCallback);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(entry->query);
+ qr->setSort(entry->sort);
+ qr->setProj(entry->projection);
+ auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
invariantOK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp
index 765871ac134..72c40a07083 100644
--- a/src/mongo/db/commands/index_filter_commands_test.cpp
+++ b/src/mongo/db/commands/index_filter_commands_test.cpp
@@ -119,12 +119,12 @@ void addQueryShapeToPlanCache(OperationContext* txn,
const char* sortStr,
const char* projectionStr) {
// Create canonical query.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(fromjson(queryStr));
- lpq->setSort(fromjson(sortStr));
- lpq->setProj(fromjson(projectionStr));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson(queryStr));
+ qr->setSort(fromjson(sortStr));
+ qr->setProj(fromjson(projectionStr));
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn, std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ CanonicalQuery::canonicalize(txn, std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -147,12 +147,12 @@ bool planCacheContains(const PlanCache& planCache,
auto txn = serviceContext.makeOperationContext();
// Create canonical query.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(fromjson(queryStr));
- lpq->setSort(fromjson(sortStr));
- lpq->setProj(fromjson(projectionStr));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson(queryStr));
+ qr->setSort(fromjson(sortStr));
+ qr->setProj(fromjson(projectionStr));
auto statusWithInputQuery = CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithInputQuery.getStatus());
unique_ptr<CanonicalQuery> inputQuery = std::move(statusWithInputQuery.getValue());
@@ -167,12 +167,12 @@ bool planCacheContains(const PlanCache& planCache,
// Canonicalizing query shape in cache entry to get cache key.
// Alternatively, we could add key to PlanCacheEntry but that would be used in one place
// only.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(entry->query);
- lpq->setSort(entry->sort);
- lpq->setProj(entry->projection);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(entry->query);
+ qr->setSort(entry->sort);
+ qr->setProj(entry->projection);
auto statusWithCurrentQuery = CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCurrentQuery.getStatus());
unique_ptr<CanonicalQuery> currentQuery = std::move(statusWithCurrentQuery.getValue());
diff --git a/src/mongo/db/commands/mr.cpp b/src/mongo/db/commands/mr.cpp
index 80d17229d5f..b86382a7903 100644
--- a/src/mongo/db/commands/mr.cpp
+++ b/src/mongo/db/commands/mr.cpp
@@ -1070,10 +1070,10 @@ void State::finalReduce(OperationContext* txn, CurOp* curOp, ProgressMeterHolder
const NamespaceString nss(_config.incLong);
const ExtensionsCallbackReal extensionsCallback(_txn, &nss);
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setSort(sortKey);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setSort(sortKey);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(lpq), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
verify(statusWithCQ.isOK());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -1444,14 +1444,14 @@ public:
unique_ptr<ScopedTransaction> scopedXact(new ScopedTransaction(txn, MODE_IS));
unique_ptr<AutoGetDb> scopedAutoDb(new AutoGetDb(txn, nss.db(), MODE_S));
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(config.filter);
- lpq->setSort(config.sort);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(config.filter);
+ qr->setSort(config.sort);
const ExtensionsCallbackReal extensionsCallback(txn, &nss);
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn, std::move(lpq), extensionsCallback);
+ CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
if (!statusWithCQ.isOK()) {
uasserted(17238, "Can't canonicalize query " + config.filter.toString());
return 0;
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index 0d4d11793f2..7fda9ad233f 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -209,12 +209,12 @@ StatusWith<unique_ptr<CanonicalQuery>> PlanCacheCommand::canonicalize(OperationC
// Create canonical query
const NamespaceString nss(ns);
- auto lpq = stdx::make_unique<LiteParsedQuery>(std::move(nss));
- lpq->setFilter(queryObj);
- lpq->setSort(sortObj);
- lpq->setProj(projObj);
+ auto qr = stdx::make_unique<QueryRequest>(std::move(nss));
+ qr->setFilter(queryObj);
+ qr->setSort(sortObj);
+ qr->setProj(projObj);
const ExtensionsCallbackReal extensionsCallback(txn, &nss);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(lpq), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
@@ -314,8 +314,8 @@ Status PlanCacheClear::clear(OperationContext* txn,
if (!planCache->contains(*cq)) {
// Log if asked to clear non-existent query shape.
LOG(1) << ns << ": query shape doesn't exist in PlanCache - "
- << cq->getQueryObj().toString() << "(sort: " << cq->getParsed().getSort()
- << "; projection: " << cq->getParsed().getProj() << ")";
+ << cq->getQueryObj().toString() << "(sort: " << cq->getQueryRequest().getSort()
+ << "; projection: " << cq->getQueryRequest().getProj() << ")";
return Status::OK();
}
@@ -325,8 +325,8 @@ Status PlanCacheClear::clear(OperationContext* txn,
}
LOG(1) << ns << ": removed plan cache entry - " << cq->getQueryObj().toString()
- << "(sort: " << cq->getParsed().getSort()
- << "; projection: " << cq->getParsed().getProj() << ")";
+ << "(sort: " << cq->getQueryRequest().getSort()
+ << "; projection: " << cq->getQueryRequest().getProj() << ")";
return Status::OK();
}
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index bb8e2f0ad7e..8730290d78a 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -130,10 +130,10 @@ TEST(PlanCacheCommandsTest, planCacheListQueryShapesOneKey) {
auto txn = serviceContext.makeOperationContext();
// Create a canonical query
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(fromjson("{a: 1}"));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson("{a: 1}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -148,8 +148,8 @@ TEST(PlanCacheCommandsTest, planCacheListQueryShapesOneKey) {
vector<BSONObj> shapes = getShapes(planCache);
ASSERT_EQUALS(shapes.size(), 1U);
ASSERT_EQUALS(shapes[0].getObjectField("query"), cq->getQueryObj());
- ASSERT_EQUALS(shapes[0].getObjectField("sort"), cq->getParsed().getSort());
- ASSERT_EQUALS(shapes[0].getObjectField("projection"), cq->getParsed().getProj());
+ ASSERT_EQUALS(shapes[0].getObjectField("sort"), cq->getQueryRequest().getSort());
+ ASSERT_EQUALS(shapes[0].getObjectField("projection"), cq->getQueryRequest().getProj());
}
/**
@@ -161,10 +161,10 @@ TEST(PlanCacheCommandsTest, planCacheClearAllShapes) {
auto txn = serviceContext.makeOperationContext();
// Create a canonical query
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(fromjson("{a: 1}"));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson("{a: 1}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -283,16 +283,16 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
auto txn = serviceContext.makeOperationContext();
// Create 2 canonical queries.
- auto lpqA = stdx::make_unique<LiteParsedQuery>(nss);
- lpqA->setFilter(fromjson("{a: 1}"));
+ auto qrA = stdx::make_unique<QueryRequest>(nss);
+ qrA->setFilter(fromjson("{a: 1}"));
auto statusWithCQA = CanonicalQuery::canonicalize(
- txn.get(), std::move(lpqA), ExtensionsCallbackDisallowExtensions());
+ txn.get(), std::move(qrA), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQA.getStatus());
- auto lpqB = stdx::make_unique<LiteParsedQuery>(nss);
- lpqB->setFilter(fromjson("{b: 1}"));
+ auto qrB = stdx::make_unique<QueryRequest>(nss);
+ qrB->setFilter(fromjson("{b: 1}"));
unique_ptr<CanonicalQuery> cqA = std::move(statusWithCQA.getValue());
auto statusWithCQB = CanonicalQuery::canonicalize(
- txn.get(), std::move(lpqB), ExtensionsCallbackDisallowExtensions());
+ txn.get(), std::move(qrB), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQB.getStatus());
unique_ptr<CanonicalQuery> cqB = std::move(statusWithCQB.getValue());
@@ -308,12 +308,12 @@ TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
// Check keys in cache before dropping {b: 1}
vector<BSONObj> shapesBefore = getShapes(planCache);
ASSERT_EQUALS(shapesBefore.size(), 2U);
- BSONObj shapeA =
- BSON("query" << cqA->getQueryObj() << "sort" << cqA->getParsed().getSort() << "projection"
- << cqA->getParsed().getProj());
- BSONObj shapeB =
- BSON("query" << cqB->getQueryObj() << "sort" << cqB->getParsed().getSort() << "projection"
- << cqB->getParsed().getProj());
+ BSONObj shapeA = BSON(
+ "query" << cqA->getQueryObj() << "sort" << cqA->getQueryRequest().getSort() << "projection"
+ << cqA->getQueryRequest().getProj());
+ BSONObj shapeB = BSON(
+ "query" << cqB->getQueryObj() << "sort" << cqB->getQueryRequest().getSort() << "projection"
+ << cqB->getQueryRequest().getProj());
ASSERT_TRUE(std::find(shapesBefore.begin(), shapesBefore.end(), shapeA) != shapesBefore.end());
ASSERT_TRUE(std::find(shapesBefore.begin(), shapesBefore.end(), shapeB) != shapesBefore.end());
@@ -415,10 +415,10 @@ TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionTrue) {
auto txn = serviceContext.makeOperationContext();
// Create a canonical query
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(fromjson("{a: 1}"));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson("{a: 1}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -430,8 +430,10 @@ TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionTrue) {
solns.push_back(&qs);
planCache.add(*cq, solns, createDecision(1U));
- vector<BSONObj> plans = getPlans(
- planCache, cq->getQueryObj(), cq->getParsed().getSort(), cq->getParsed().getProj());
+ vector<BSONObj> plans = getPlans(planCache,
+ cq->getQueryObj(),
+ cq->getQueryRequest().getSort(),
+ cq->getQueryRequest().getProj());
ASSERT_EQUALS(plans.size(), 1U);
}
@@ -440,10 +442,10 @@ TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionFalse) {
auto txn = serviceContext.makeOperationContext();
// Create a canonical query
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(fromjson("{a: 1}"));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson("{a: 1}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -457,8 +459,10 @@ TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionFalse) {
solns.push_back(&qs);
planCache.add(*cq, solns, createDecision(2U));
- vector<BSONObj> plans = getPlans(
- planCache, cq->getQueryObj(), cq->getParsed().getSort(), cq->getParsed().getProj());
+ vector<BSONObj> plans = getPlans(planCache,
+ cq->getQueryObj(),
+ cq->getQueryRequest().getSort(),
+ cq->getQueryRequest().getProj());
ASSERT_EQUALS(plans.size(), 2U);
}
diff --git a/src/mongo/db/dbcommands.cpp b/src/mongo/db/dbcommands.cpp
index 4b5df659bd7..2b00cab6ea3 100644
--- a/src/mongo/db/dbcommands.cpp
+++ b/src/mongo/db/dbcommands.cpp
@@ -631,12 +631,12 @@ public:
BSONObj sort = BSON("files_id" << 1 << "n" << 1);
MONGO_WRITE_CONFLICT_RETRY_LOOP_BEGIN {
- auto lpq = stdx::make_unique<LiteParsedQuery>(NamespaceString(ns));
- lpq->setFilter(query);
- lpq->setSort(sort);
+ auto qr = stdx::make_unique<QueryRequest>(NamespaceString(ns));
+ qr->setFilter(query);
+ qr->setSort(sort);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn, std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn, std::move(qr), ExtensionsCallbackDisallowExtensions());
if (!statusWithCQ.isOK()) {
uasserted(17240, "Can't canonicalize query " + query.toString());
return 0;
@@ -1219,10 +1219,10 @@ const std::size_t kQueryOptionMaxTimeMSField = 3;
// We make an array of the fields we need so we can call getFields once. This saves repeated
// scans over the command object.
-const std::array<StringData, 4> neededFieldNames{LiteParsedQuery::cmdOptionMaxTimeMS,
+const std::array<StringData, 4> neededFieldNames{QueryRequest::cmdOptionMaxTimeMS,
Command::kHelpFieldName,
ChunkVersion::kShardVersionField,
- LiteParsedQuery::queryOptionMaxTimeMS};
+ QueryRequest::queryOptionMaxTimeMS};
} // namespace
void appendOpTimeMetadata(OperationContext* txn,
@@ -1348,7 +1348,7 @@ void Command::execCommand(OperationContext* txn,
// Handle command option maxTimeMS.
int maxTimeMS = uassertStatusOK(
- LiteParsedQuery::parseMaxTimeMS(extractedFields[kCmdOptionMaxTimeMSField]));
+ QueryRequest::parseMaxTimeMS(extractedFields[kCmdOptionMaxTimeMSField]));
uassert(ErrorCodes::InvalidOptions,
"no such command option $maxTimeMs; use maxTimeMS instead",
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index dc88905ae63..a3094773e03 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -134,10 +134,10 @@ RecordId Helpers::findOne(OperationContext* txn,
const ExtensionsCallbackReal extensionsCallback(txn, &collection->ns());
- auto lpq = stdx::make_unique<LiteParsedQuery>(collection->ns());
- lpq->setFilter(query);
+ auto qr = stdx::make_unique<QueryRequest>(collection->ns());
+ qr->setFilter(query);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(lpq), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
massert(17244, "Could not canonicalize " + query.toString(), statusWithCQ.isOK());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp
index 5ec0329f8b1..60ad6ed6fd3 100644
--- a/src/mongo/db/exec/cached_plan.cpp
+++ b/src/mongo/db/exec/cached_plan.cpp
@@ -336,8 +336,8 @@ void CachedPlanStage::updatePlanCache() {
LOG(5) << _canonicalQuery->ns()
<< ": Failed to update cache with feedback: " << fbs.toString() << " - "
<< "(query: " << _canonicalQuery->getQueryObj()
- << "; sort: " << _canonicalQuery->getParsed().getSort()
- << "; projection: " << _canonicalQuery->getParsed().getProj()
+ << "; sort: " << _canonicalQuery->getQueryRequest().getSort()
+ << "; projection: " << _canonicalQuery->getQueryRequest().getProj()
<< ") is no longer in plan cache.";
}
}
diff --git a/src/mongo/db/exec/idhack.cpp b/src/mongo/db/exec/idhack.cpp
index 4dda2e38ed0..b1113be2e9e 100644
--- a/src/mongo/db/exec/idhack.cpp
+++ b/src/mongo/db/exec/idhack.cpp
@@ -227,10 +227,10 @@ void IDHackStage::doInvalidate(OperationContext* txn, const RecordId& dl, Invali
// static
bool IDHackStage::supportsQuery(const CanonicalQuery& query) {
- return !query.getParsed().showRecordId() && query.getParsed().getHint().isEmpty() &&
- query.getParsed().getCollation().isEmpty() && !query.getParsed().getSkip() &&
- CanonicalQuery::isSimpleIdQuery(query.getParsed().getFilter()) &&
- !query.getParsed().isTailable();
+ return !query.getQueryRequest().showRecordId() && query.getQueryRequest().getHint().isEmpty() &&
+ query.getQueryRequest().getCollation().isEmpty() && !query.getQueryRequest().getSkip() &&
+ CanonicalQuery::isSimpleIdQuery(query.getQueryRequest().getFilter()) &&
+ !query.getQueryRequest().isTailable();
}
unique_ptr<PlanStageStats> IDHackStage::getStats() {
diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp
index 7f9185435c9..79bb271e00a 100644
--- a/src/mongo/db/exec/multi_plan.cpp
+++ b/src/mongo/db/exec/multi_plan.cpp
@@ -190,10 +190,11 @@ size_t MultiPlanStage::getTrialPeriodNumToReturn(const CanonicalQuery& query) {
// Determine the number of results which we will produce during the plan
// ranking phase before stopping.
size_t numResults = static_cast<size_t>(internalQueryPlanEvaluationMaxResults);
- if (query.getParsed().getNToReturn()) {
- numResults = std::min(static_cast<size_t>(*query.getParsed().getNToReturn()), numResults);
- } else if (query.getParsed().getLimit()) {
- numResults = std::min(static_cast<size_t>(*query.getParsed().getLimit()), numResults);
+ if (query.getQueryRequest().getNToReturn()) {
+ numResults =
+ std::min(static_cast<size_t>(*query.getQueryRequest().getNToReturn()), numResults);
+ } else if (query.getQueryRequest().getLimit()) {
+ numResults = std::min(static_cast<size_t>(*query.getQueryRequest().getLimit()), numResults);
}
return numResults;
diff --git a/src/mongo/db/exec/projection_exec.cpp b/src/mongo/db/exec/projection_exec.cpp
index 383edf740f7..0bc0809e01a 100644
--- a/src/mongo/db/exec/projection_exec.cpp
+++ b/src/mongo/db/exec/projection_exec.cpp
@@ -32,7 +32,7 @@
#include "mongo/db/matcher/expression.h"
#include "mongo/db/matcher/expression_parser.h"
#include "mongo/db/query/collation/collator_interface.h"
-#include "mongo/db/query/lite_parsed_query.h"
+#include "mongo/db/query/query_request.h"
#include "mongo/util/mongoutils/str.h"
namespace mongo {
@@ -138,18 +138,18 @@ ProjectionExec::ProjectionExec(const BSONObj& spec,
add(e.fieldName(), true);
} else if (mongoutils::str::equals(e2.fieldName(), "$meta")) {
verify(String == e2.type());
- if (e2.valuestr() == LiteParsedQuery::metaTextScore) {
+ if (e2.valuestr() == QueryRequest::metaTextScore) {
_meta[e.fieldName()] = META_TEXT_SCORE;
- } else if (e2.valuestr() == LiteParsedQuery::metaSortKey) {
+ } else if (e2.valuestr() == QueryRequest::metaSortKey) {
_sortKeyMetaFields.push_back(e.fieldName());
_meta[_sortKeyMetaFields.back()] = META_SORT_KEY;
- } else if (e2.valuestr() == LiteParsedQuery::metaRecordId) {
+ } else if (e2.valuestr() == QueryRequest::metaRecordId) {
_meta[e.fieldName()] = META_RECORDID;
- } else if (e2.valuestr() == LiteParsedQuery::metaGeoNearPoint) {
+ } else if (e2.valuestr() == QueryRequest::metaGeoNearPoint) {
_meta[e.fieldName()] = META_GEONEAR_POINT;
- } else if (e2.valuestr() == LiteParsedQuery::metaGeoNearDistance) {
+ } else if (e2.valuestr() == QueryRequest::metaGeoNearDistance) {
_meta[e.fieldName()] = META_GEONEAR_DIST;
- } else if (e2.valuestr() == LiteParsedQuery::metaIndexKey) {
+ } else if (e2.valuestr() == QueryRequest::metaIndexKey) {
_hasReturnKey = true;
} else {
// This shouldn't happen, should be caught by parsing.
diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp
index c7378c531de..91dabd1ce46 100644
--- a/src/mongo/db/exec/sort.cpp
+++ b/src/mongo/db/exec/sort.cpp
@@ -39,7 +39,6 @@
#include "mongo/db/index/btree_key_generator.h"
#include "mongo/db/index_names.h"
#include "mongo/db/query/find_common.h"
-#include "mongo/db/query/lite_parsed_query.h"
#include "mongo/db/query/query_knobs.h"
#include "mongo/db/query/query_planner.h"
#include "mongo/stdx/memory.h"
diff --git a/src/mongo/db/exec/sort_key_generator.cpp b/src/mongo/db/exec/sort_key_generator.cpp
index 28b0d33c1a3..b04849bb046 100644
--- a/src/mongo/db/exec/sort_key_generator.cpp
+++ b/src/mongo/db/exec/sort_key_generator.cpp
@@ -74,7 +74,7 @@ SortKeyGenerator::SortKeyGenerator(OperationContext* txn,
if (elt.isNumber()) {
// Btree key. elt (should be) foo: 1 or foo: -1.
btreeBob.append(elt);
- } else if (LiteParsedQuery::isTextScoreMeta(elt)) {
+ } else if (QueryRequest::isTextScoreMeta(elt)) {
_sortHasMeta = true;
} else {
// Sort spec. should have been validated before here.
@@ -139,7 +139,7 @@ Status SortKeyGenerator::getSortKey(const WorkingSetMember& member, BSONObj* obj
if (elt.isNumber()) {
// Merge btree key elt.
mergedKeyBob.append(sortKeyIt.next());
- } else if (LiteParsedQuery::isTextScoreMeta(elt)) {
+ } else if (QueryRequest::isTextScoreMeta(elt)) {
// Add text score metadata
double score = 0.0;
if (member.hasComputed(WSM_COMPUTED_TEXT_SCORE)) {
@@ -245,14 +245,14 @@ void SortKeyGenerator::getBoundsForSort(OperationContext* txn,
_collator);
params.indices.push_back(sortOrder);
- auto lpq = stdx::make_unique<LiteParsedQuery>(NamespaceString("fake.ns"));
- lpq->setFilter(queryObj);
+ auto qr = stdx::make_unique<QueryRequest>(NamespaceString("fake.ns"));
+ qr->setFilter(queryObj);
if (_collator) {
- lpq->setCollation(CollationSerializer::specToBSON(_collator->getSpec()));
+ qr->setCollation(CollationSerializer::specToBSON(_collator->getSpec()));
}
auto statusWithQueryForSort =
- CanonicalQuery::canonicalize(txn, std::move(lpq), ExtensionsCallbackNoop());
+ CanonicalQuery::canonicalize(txn, std::move(qr), ExtensionsCallbackNoop());
verify(statusWithQueryForSort.isOK());
std::unique_ptr<CanonicalQuery> queryForSort = std::move(statusWithQueryForSort.getValue());
diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp
index 5d1fee15703..d961e64e134 100644
--- a/src/mongo/db/exec/subplan.cpp
+++ b/src/mongo/db/exec/subplan.cpp
@@ -92,33 +92,33 @@ bool isContainedOr(const MatchExpression* expr) {
} // namespace
bool SubplanStage::canUseSubplanning(const CanonicalQuery& query) {
- const LiteParsedQuery& lpq = query.getParsed();
+ const QueryRequest& qr = query.getQueryRequest();
const MatchExpression* expr = query.root();
// Hint provided
- if (!lpq.getHint().isEmpty()) {
+ if (!qr.getHint().isEmpty()) {
return false;
}
// Min provided
// Min queries are a special case of hinted queries.
- if (!lpq.getMin().isEmpty()) {
+ if (!qr.getMin().isEmpty()) {
return false;
}
// Max provided
// Similar to min, max queries are a special case of hinted queries.
- if (!lpq.getMax().isEmpty()) {
+ if (!qr.getMax().isEmpty()) {
return false;
}
// Tailable cursors won't get cached, just turn into collscans.
- if (query.getParsed().isTailable()) {
+ if (query.getQueryRequest().isTailable()) {
return false;
}
// Snapshot is really a hint.
- if (query.getParsed().isSnapshot()) {
+ if (query.getQueryRequest().isSnapshot()) {
return false;
}
@@ -167,7 +167,7 @@ Status SubplanStage::planSubqueries() {
_orExpression = _query->root()->shallowClone();
if (isContainedOr(_orExpression.get())) {
_orExpression = rewriteToRootedOr(std::move(_orExpression));
- invariant(CanonicalQuery::isValid(_orExpression.get(), _query->getParsed()).isOK());
+ invariant(CanonicalQuery::isValid(_orExpression.get(), _query->getQueryRequest()).isOK());
}
for (size_t i = 0; i < _plannerParams.indices.size(); ++i) {
diff --git a/src/mongo/db/ops/parsed_delete.cpp b/src/mongo/db/ops/parsed_delete.cpp
index b2f723e455e..b22441e94e5 100644
--- a/src/mongo/db/ops/parsed_delete.cpp
+++ b/src/mongo/db/ops/parsed_delete.cpp
@@ -77,11 +77,11 @@ Status ParsedDelete::parseQueryToCQ() {
// The projection needs to be applied after the delete operation, so we do not specify a
// projection during canonicalization.
- auto lpq = stdx::make_unique<LiteParsedQuery>(_request->getNamespaceString());
- lpq->setFilter(_request->getQuery());
- lpq->setSort(_request->getSort());
- lpq->setCollation(_request->getCollation());
- lpq->setExplain(_request->isExplain());
+ auto qr = stdx::make_unique<QueryRequest>(_request->getNamespaceString());
+ qr->setFilter(_request->getQuery());
+ qr->setSort(_request->getSort());
+ qr->setCollation(_request->getCollation());
+ qr->setExplain(_request->isExplain());
// Limit should only used for the findAndModify command when a sort is specified. If a sort
// is requested, we want to use a top-k sort for efficiency reasons, so should pass the
@@ -90,10 +90,10 @@ Status ParsedDelete::parseQueryToCQ() {
// has not actually deleted a document. This behavior is fine for findAndModify, but should
// not apply to deletes in general.
if (!_request->isMulti() && !_request->getSort().isEmpty()) {
- lpq->setLimit(1);
+ qr->setLimit(1);
}
- auto statusWithCQ = CanonicalQuery::canonicalize(_txn, std::move(lpq), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(_txn, std::move(qr), extensionsCallback);
if (statusWithCQ.isOK()) {
_canonicalQuery = std::move(statusWithCQ.getValue());
@@ -118,7 +118,7 @@ PlanExecutor::YieldPolicy ParsedDelete::yieldPolicy() const {
bool ParsedDelete::isIsolated() const {
return _canonicalQuery.get() ? _canonicalQuery->isIsolated()
- : LiteParsedQuery::isQueryIsolated(_request->getQuery());
+ : QueryRequest::isQueryIsolated(_request->getQuery());
}
bool ParsedDelete::hasParsedQuery() const {
diff --git a/src/mongo/db/ops/parsed_update.cpp b/src/mongo/db/ops/parsed_update.cpp
index bccd6491048..a6cb8b97a81 100644
--- a/src/mongo/db/ops/parsed_update.cpp
+++ b/src/mongo/db/ops/parsed_update.cpp
@@ -92,11 +92,11 @@ Status ParsedUpdate::parseQueryToCQ() {
// The projection needs to be applied after the update operation, so we do not specify a
// projection during canonicalization.
- auto lpq = stdx::make_unique<LiteParsedQuery>(_request->getNamespaceString());
- lpq->setFilter(_request->getQuery());
- lpq->setSort(_request->getSort());
- lpq->setCollation(_request->getCollation());
- lpq->setExplain(_request->isExplain());
+ auto qr = stdx::make_unique<QueryRequest>(_request->getNamespaceString());
+ qr->setFilter(_request->getQuery());
+ qr->setSort(_request->getSort());
+ qr->setCollation(_request->getCollation());
+ qr->setExplain(_request->isExplain());
// Limit should only used for the findAndModify command when a sort is specified. If a sort
// is requested, we want to use a top-k sort for efficiency reasons, so should pass the
@@ -106,10 +106,10 @@ Status ParsedUpdate::parseQueryToCQ() {
// not apply to update in general.
// TODO SERVER-23473: Pass the collation to canonicalize().
if (!_request->isMulti() && !_request->getSort().isEmpty()) {
- lpq->setLimit(1);
+ qr->setLimit(1);
}
- auto statusWithCQ = CanonicalQuery::canonicalize(_txn, std::move(lpq), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(_txn, std::move(qr), extensionsCallback);
if (statusWithCQ.isOK()) {
_canonicalQuery = std::move(statusWithCQ.getValue());
}
@@ -146,7 +146,7 @@ PlanExecutor::YieldPolicy ParsedUpdate::yieldPolicy() const {
bool ParsedUpdate::isIsolated() const {
return _canonicalQuery.get() ? _canonicalQuery->isIsolated()
- : LiteParsedQuery::isQueryIsolated(_request->getQuery());
+ : QueryRequest::isQueryIsolated(_request->getQuery());
}
bool ParsedUpdate::hasParsedQuery() const {
diff --git a/src/mongo/db/ops/update_driver.cpp b/src/mongo/db/ops/update_driver.cpp
index fad283493cd..b488bad70e7 100644
--- a/src/mongo/db/ops/update_driver.cpp
+++ b/src/mongo/db/ops/update_driver.cpp
@@ -180,9 +180,9 @@ Status UpdateDriver::populateDocumentWithQueryFields(OperationContext* txn,
// We canonicalize the query to collapse $and/$or, and the namespace is not needed. Also,
// because this is for the upsert case, where we insert a new document if one was not found, the
// $where/$text clauses do not make sense, hence empty ExtensionsCallback.
- auto lpq = stdx::make_unique<LiteParsedQuery>(NamespaceString(""));
- lpq->setFilter(query);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(lpq), ExtensionsCallbackNoop());
+ auto qr = stdx::make_unique<QueryRequest>(NamespaceString(""));
+ qr->setFilter(query);
+ auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), ExtensionsCallbackNoop());
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
diff --git a/src/mongo/db/pipeline/pipeline.cpp b/src/mongo/db/pipeline/pipeline.cpp
index c1ad28bc375..00b74b7d9b9 100644
--- a/src/mongo/db/pipeline/pipeline.cpp
+++ b/src/mongo/db/pipeline/pipeline.cpp
@@ -89,7 +89,7 @@ intrusive_ptr<Pipeline> Pipeline::parseCommand(string& errmsg,
}
// maxTimeMS is also for the command processor.
- if (str::equals(pFieldName, LiteParsedQuery::cmdOptionMaxTimeMS)) {
+ if (str::equals(pFieldName, QueryRequest::cmdOptionMaxTimeMS)) {
continue;
}
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index f30e513a2f4..0407a93694f 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -209,17 +209,17 @@ StatusWith<std::unique_ptr<PlanExecutor>> attemptToGetExecutor(
BSONObj projectionObj,
BSONObj sortObj,
const size_t plannerOpts) {
- auto lpq = stdx::make_unique<LiteParsedQuery>(pExpCtx->ns);
- lpq->setFilter(queryObj);
- lpq->setProj(projectionObj);
- lpq->setSort(sortObj);
+ auto qr = stdx::make_unique<QueryRequest>(pExpCtx->ns);
+ qr->setFilter(queryObj);
+ qr->setProj(projectionObj);
+ qr->setSort(sortObj);
if (pExpCtx->collator) {
- lpq->setCollation(CollationSerializer::specToBSON(pExpCtx->collator->getSpec()));
+ qr->setCollation(CollationSerializer::specToBSON(pExpCtx->collator->getSpec()));
}
const ExtensionsCallbackReal extensionsCallback(pExpCtx->opCtx, &pExpCtx->ns);
- auto cq = CanonicalQuery::canonicalize(txn, std::move(lpq), extensionsCallback);
+ auto cq = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
if (!cq.isOK()) {
// Return an error instead of uasserting, since there are cases where the combination of
diff --git a/src/mongo/db/query/SConscript b/src/mongo/db/query/SConscript
index 676f27d3cd5..607efa0b8a6 100644
--- a/src/mongo/db/query/SConscript
+++ b/src/mongo/db/query/SConscript
@@ -82,7 +82,7 @@ env.Library(
"$BUILD_DIR/mongo/base",
"$BUILD_DIR/mongo/util/fail_point",
"collation/collator_icu",
- "lite_parsed_query",
+ "query_request",
],
)
@@ -138,7 +138,7 @@ env.Library(
'$BUILD_DIR/mongo/db/namespace_string',
'$BUILD_DIR/mongo/db/repl/optime',
'$BUILD_DIR/mongo/rpc/command_status',
- 'lite_parsed_query',
+ 'query_request',
]
)
@@ -174,9 +174,9 @@ env.Library(
)
env.Library(
- target="lite_parsed_query",
+ target="query_request",
source=[
- "lite_parsed_query.cpp"
+ "query_request.cpp"
],
LIBDEPS=[
"$BUILD_DIR/mongo/base",
@@ -262,12 +262,12 @@ env.CppUnitTest(
)
env.CppUnitTest(
- target="lite_parsed_query_test",
+ target="query_request_test",
source=[
- "lite_parsed_query_test.cpp"
+ "query_request_test.cpp"
],
LIBDEPS=[
- "lite_parsed_query",
+ "query_request",
],
)
diff --git a/src/mongo/db/query/canonical_query.cpp b/src/mongo/db/query/canonical_query.cpp
index 360e796708f..653f73b51f9 100644
--- a/src/mongo/db/query/canonical_query.cpp
+++ b/src/mongo/db/query/canonical_query.cpp
@@ -101,29 +101,29 @@ bool matchExpressionLessThan(const MatchExpression* lhs, const MatchExpression*
// static
StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
OperationContext* txn, const QueryMessage& qm, const ExtensionsCallback& extensionsCallback) {
- // Make LiteParsedQuery.
- auto lpqStatus = LiteParsedQuery::fromLegacyQueryMessage(qm);
- if (!lpqStatus.isOK()) {
- return lpqStatus.getStatus();
+ // Make QueryRequest.
+ auto qrStatus = QueryRequest::fromLegacyQueryMessage(qm);
+ if (!qrStatus.isOK()) {
+ return qrStatus.getStatus();
}
- return CanonicalQuery::canonicalize(txn, std::move(lpqStatus.getValue()), extensionsCallback);
+ return CanonicalQuery::canonicalize(txn, std::move(qrStatus.getValue()), extensionsCallback);
}
// static
StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
OperationContext* txn,
- std::unique_ptr<LiteParsedQuery> lpq,
+ std::unique_ptr<QueryRequest> qr,
const ExtensionsCallback& extensionsCallback) {
- auto lpqStatus = lpq->validate();
- if (!lpqStatus.isOK()) {
- return lpqStatus;
+ auto qrStatus = qr->validate();
+ if (!qrStatus.isOK()) {
+ return qrStatus;
}
std::unique_ptr<CollatorInterface> collator;
- if (!lpq->getCollation().isEmpty()) {
+ if (!qr->getCollation().isEmpty()) {
auto statusWithCollator = CollatorFactoryInterface::get(txn->getServiceContext())
- ->makeFromBSON(lpq->getCollation());
+ ->makeFromBSON(qr->getCollation());
if (!statusWithCollator.isOK()) {
return statusWithCollator.getStatus();
}
@@ -132,7 +132,7 @@ StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
// Make MatchExpression.
StatusWithMatchExpression statusWithMatcher =
- MatchExpressionParser::parse(lpq->getFilter(), extensionsCallback, collator.get());
+ MatchExpressionParser::parse(qr->getFilter(), extensionsCallback, collator.get());
if (!statusWithMatcher.isOK()) {
return statusWithMatcher.getStatus();
}
@@ -142,7 +142,7 @@ StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
std::unique_ptr<CanonicalQuery> cq(new CanonicalQuery());
Status initStatus =
- cq->init(std::move(lpq), extensionsCallback, me.release(), std::move(collator));
+ cq->init(std::move(qr), extensionsCallback, me.release(), std::move(collator));
if (!initStatus.isOK()) {
return initStatus;
@@ -156,17 +156,17 @@ StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
const CanonicalQuery& baseQuery,
MatchExpression* root,
const ExtensionsCallback& extensionsCallback) {
- // TODO: we should be passing the filter corresponding to 'root' to the LPQ rather than the base
- // query's filter, baseQuery.getParsed().getFilter().
- auto lpq = stdx::make_unique<LiteParsedQuery>(baseQuery.nss());
- lpq->setFilter(baseQuery.getParsed().getFilter());
- lpq->setProj(baseQuery.getParsed().getProj());
- lpq->setSort(baseQuery.getParsed().getSort());
- lpq->setCollation(baseQuery.getParsed().getCollation());
- lpq->setExplain(baseQuery.getParsed().isExplain());
- auto lpqStatus = lpq->validate();
- if (!lpqStatus.isOK()) {
- return lpqStatus;
+ // TODO: we should be passing the filter corresponding to 'root' to the QR rather than the base
+ // query's filter, baseQuery.getQueryRequest().getFilter().
+ auto qr = stdx::make_unique<QueryRequest>(baseQuery.nss());
+ qr->setFilter(baseQuery.getQueryRequest().getFilter());
+ qr->setProj(baseQuery.getQueryRequest().getProj());
+ qr->setSort(baseQuery.getQueryRequest().getSort());
+ qr->setCollation(baseQuery.getQueryRequest().getCollation());
+ qr->setExplain(baseQuery.getQueryRequest().isExplain());
+ auto qrStatus = qr->validate();
+ if (!qrStatus.isOK()) {
+ return qrStatus;
}
std::unique_ptr<CollatorInterface> collator;
@@ -177,7 +177,7 @@ StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
// Make the CQ we'll hopefully return.
std::unique_ptr<CanonicalQuery> cq(new CanonicalQuery());
Status initStatus = cq->init(
- std::move(lpq), extensionsCallback, root->shallowClone().release(), std::move(collator));
+ std::move(qr), extensionsCallback, root->shallowClone().release(), std::move(collator));
if (!initStatus.isOK()) {
return initStatus;
@@ -185,38 +185,38 @@ StatusWith<std::unique_ptr<CanonicalQuery>> CanonicalQuery::canonicalize(
return std::move(cq);
}
-Status CanonicalQuery::init(std::unique_ptr<LiteParsedQuery> lpq,
+Status CanonicalQuery::init(std::unique_ptr<QueryRequest> qr,
const ExtensionsCallback& extensionsCallback,
MatchExpression* root,
std::unique_ptr<CollatorInterface> collator) {
- _pq = std::move(lpq);
+ _qr = std::move(qr);
_collator = std::move(collator);
_hasNoopExtensions = extensionsCallback.hasNoopExtensions();
- _isIsolated = LiteParsedQuery::isQueryIsolated(_pq->getFilter());
+ _isIsolated = QueryRequest::isQueryIsolated(_qr->getFilter());
// Normalize, sort and validate tree.
root = normalizeTree(root);
sortTree(root);
_root.reset(root);
- Status validStatus = isValid(root, *_pq);
+ Status validStatus = isValid(root, *_qr);
if (!validStatus.isOK()) {
return validStatus;
}
// Validate the projection if there is one.
- if (!_pq->getProj().isEmpty()) {
+ if (!_qr->getProj().isEmpty()) {
ParsedProjection* pp;
Status projStatus =
- ParsedProjection::make(_pq->getProj(), _root.get(), &pp, extensionsCallback);
+ ParsedProjection::make(_qr->getProj(), _root.get(), &pp, extensionsCallback);
if (!projStatus.isOK()) {
return projStatus;
}
_proj.reset(pp);
}
- if (_proj && _proj->wantSortKey() && _pq->getSort().isEmpty()) {
+ if (_proj && _proj->wantSortKey() && _qr->getSort().isEmpty()) {
return Status(ErrorCodes::BadValue, "cannot use sortKey $meta projection without a sort");
}
@@ -387,7 +387,7 @@ bool hasNodeInSubtree(MatchExpression* root,
}
// static
-Status CanonicalQuery::isValid(MatchExpression* root, const LiteParsedQuery& parsed) {
+Status CanonicalQuery::isValid(MatchExpression* root, const QueryRequest& parsed) {
// Analysis below should be done after squashing the tree to make it clearer.
// There can only be one TEXT. If there is a TEXT, it cannot appear inside a NOR.
@@ -485,50 +485,50 @@ Status CanonicalQuery::isValid(MatchExpression* root, const LiteParsedQuery& par
std::string CanonicalQuery::toString() const {
str::stream ss;
- ss << "ns=" << _pq->ns();
+ ss << "ns=" << _qr->ns();
- if (_pq->getBatchSize()) {
- ss << " batchSize=" << *_pq->getBatchSize();
+ if (_qr->getBatchSize()) {
+ ss << " batchSize=" << *_qr->getBatchSize();
}
- if (_pq->getLimit()) {
- ss << " limit=" << *_pq->getLimit();
+ if (_qr->getLimit()) {
+ ss << " limit=" << *_qr->getLimit();
}
- if (_pq->getSkip()) {
- ss << " skip=" << *_pq->getSkip();
+ if (_qr->getSkip()) {
+ ss << " skip=" << *_qr->getSkip();
}
- if (_pq->getNToReturn()) {
- ss << " ntoreturn=" << *_pq->getNToReturn() << '\n';
+ if (_qr->getNToReturn()) {
+ ss << " ntoreturn=" << *_qr->getNToReturn() << '\n';
}
// The expression tree puts an endl on for us.
ss << "Tree: " << _root->toString();
- ss << "Sort: " << _pq->getSort().toString() << '\n';
- ss << "Proj: " << _pq->getProj().toString() << '\n';
+ ss << "Sort: " << _qr->getSort().toString() << '\n';
+ ss << "Proj: " << _qr->getProj().toString() << '\n';
return ss;
}
std::string CanonicalQuery::toStringShort() const {
str::stream ss;
- ss << "query: " << _pq->getFilter().toString() << " sort: " << _pq->getSort().toString()
- << " projection: " << _pq->getProj().toString();
+ ss << "query: " << _qr->getFilter().toString() << " sort: " << _qr->getSort().toString()
+ << " projection: " << _qr->getProj().toString();
- if (_pq->getBatchSize()) {
- ss << " batchSize: " << *_pq->getBatchSize();
+ if (_qr->getBatchSize()) {
+ ss << " batchSize: " << *_qr->getBatchSize();
}
- if (_pq->getLimit()) {
- ss << " limit: " << *_pq->getLimit();
+ if (_qr->getLimit()) {
+ ss << " limit: " << *_qr->getLimit();
}
- if (_pq->getSkip()) {
- ss << " skip: " << *_pq->getSkip();
+ if (_qr->getSkip()) {
+ ss << " skip: " << *_qr->getSkip();
}
- if (_pq->getNToReturn()) {
- ss << " ntoreturn=" << *_pq->getNToReturn();
+ if (_qr->getNToReturn()) {
+ ss << " ntoreturn=" << *_qr->getNToReturn();
}
return ss;
diff --git a/src/mongo/db/query/canonical_query.h b/src/mongo/db/query/canonical_query.h
index d53c84424f4..ac2e6c2b4a9 100644
--- a/src/mongo/db/query/canonical_query.h
+++ b/src/mongo/db/query/canonical_query.h
@@ -34,8 +34,8 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/matcher/expression.h"
#include "mongo/db/query/collation/collator_interface.h"
-#include "mongo/db/query/lite_parsed_query.h"
#include "mongo/db/query/parsed_projection.h"
+#include "mongo/db/query/query_request.h"
namespace mongo {
@@ -65,7 +65,7 @@ public:
* CanonicalQuery.
*/
static StatusWith<std::unique_ptr<CanonicalQuery>> canonicalize(
- OperationContext* txn, std::unique_ptr<LiteParsedQuery> lpq, const ExtensionsCallback&);
+ OperationContext* txn, std::unique_ptr<QueryRequest> qr, const ExtensionsCallback&);
/**
* For testing or for internal clients to use.
@@ -91,10 +91,10 @@ public:
static bool isSimpleIdQuery(const BSONObj& query);
const NamespaceString& nss() const {
- return _pq->nss();
+ return _qr->nss();
}
const std::string& ns() const {
- return _pq->nss().ns();
+ return _qr->nss().ns();
}
//
@@ -104,10 +104,10 @@ public:
return _root.get();
}
BSONObj getQueryObj() const {
- return _pq->getFilter();
+ return _qr->getFilter();
}
- const LiteParsedQuery& getParsed() const {
- return *_pq;
+ const QueryRequest& getQueryRequest() const {
+ return *_qr;
}
const ParsedProjection* getProj() const {
return _proj.get();
@@ -123,13 +123,13 @@ public:
/**
* Validates match expression, checking for certain
* combinations of operators in match expression and
- * query options in LiteParsedQuery.
- * Since 'root' is derived from 'filter' in LiteParsedQuery,
+ * query options in QueryRequest.
+ * Since 'root' is derived from 'filter' in QueryRequest,
* 'filter' is not validated.
*
* TODO: Move this to query_validator.cpp
*/
- static Status isValid(MatchExpression* root, const LiteParsedQuery& parsed);
+ static Status isValid(MatchExpression* root, const QueryRequest& parsed);
/**
* Returns the normalized version of the subtree rooted at 'root'.
@@ -172,14 +172,14 @@ private:
// You must go through canonicalize to create a CanonicalQuery.
CanonicalQuery() {}
- Status init(std::unique_ptr<LiteParsedQuery> lpq,
+ Status init(std::unique_ptr<QueryRequest> qr,
const ExtensionsCallback& extensionsCallback,
MatchExpression* root,
std::unique_ptr<CollatorInterface> collator);
- std::unique_ptr<LiteParsedQuery> _pq;
+ std::unique_ptr<QueryRequest> _qr;
- // _root points into _pq->getFilter()
+ // _root points into _qr->getFilter()
std::unique_ptr<MatchExpression> _root;
std::unique_ptr<ParsedProjection> _proj;
diff --git a/src/mongo/db/query/canonical_query_test.cpp b/src/mongo/db/query/canonical_query_test.cpp
index c5e54fedf31..3aa2d1b1b28 100644
--- a/src/mongo/db/query/canonical_query_test.cpp
+++ b/src/mongo/db/query/canonical_query_test.cpp
@@ -67,13 +67,13 @@ MatchExpression* parseMatchExpression(const BSONObj& obj) {
/**
* Helper function which parses and normalizes 'queryStr', and returns whether the given
- * (expression tree, lite parsed query) tuple passes CanonicalQuery::isValid().
+ * (expression tree, query request) tuple passes CanonicalQuery::isValid().
* Returns Status::OK() if the tuple is valid, else returns an error Status.
*/
-Status isValid(const std::string& queryStr, const LiteParsedQuery& lpqRaw) {
+Status isValid(const std::string& queryStr, const QueryRequest& qrRaw) {
BSONObj queryObj = fromjson(queryStr);
unique_ptr<MatchExpression> me(CanonicalQuery::normalizeTree(parseMatchExpression(queryObj)));
- return CanonicalQuery::isValid(me.get(), lpqRaw);
+ return CanonicalQuery::isValid(me.get(), qrRaw);
}
void assertEquivalent(const char* queryStr,
@@ -104,12 +104,12 @@ void assertNotEquivalent(const char* queryStr,
TEST(CanonicalQueryTest, IsValidText) {
- // Filter inside LiteParsedQuery is not used.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- ASSERT_OK(lpq->validate());
+ // Filter inside QueryRequest is not used.
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ ASSERT_OK(qr->validate());
// Valid: regular TEXT.
- ASSERT_OK(isValid("{$text: {$search: 's'}}", *lpq));
+ ASSERT_OK(isValid("{$text: {$search: 's'}}", *qr));
// Valid: TEXT inside OR.
ASSERT_OK(
@@ -117,13 +117,13 @@ TEST(CanonicalQueryTest, IsValidText) {
" {$text: {$search: 's'}},"
" {a: 1}"
"]}",
- *lpq));
+ *qr));
// Valid: TEXT outside NOR.
- ASSERT_OK(isValid("{$text: {$search: 's'}, $nor: [{a: 1}, {b: 1}]}", *lpq));
+ ASSERT_OK(isValid("{$text: {$search: 's'}, $nor: [{a: 1}, {b: 1}]}", *qr));
// Invalid: TEXT inside NOR.
- ASSERT_NOT_OK(isValid("{$nor: [{$text: {$search: 's'}}, {a: 1}]}", *lpq));
+ ASSERT_NOT_OK(isValid("{$nor: [{$text: {$search: 's'}}, {a: 1}]}", *qr));
// Invalid: TEXT inside NOR.
ASSERT_NOT_OK(
@@ -134,7 +134,7 @@ TEST(CanonicalQueryTest, IsValidText) {
" ]},"
" {a: 2}"
"]}",
- *lpq));
+ *qr));
// Invalid: >1 TEXT.
ASSERT_NOT_OK(
@@ -142,7 +142,7 @@ TEST(CanonicalQueryTest, IsValidText) {
" {$text: {$search: 's'}},"
" {$text: {$search: 't'}}"
"]}",
- *lpq));
+ *qr));
// Invalid: >1 TEXT.
ASSERT_NOT_OK(
@@ -156,26 +156,26 @@ TEST(CanonicalQueryTest, IsValidText) {
" {b: 1}"
" ]}"
"]}",
- *lpq));
+ *qr));
}
TEST(CanonicalQueryTest, IsValidTextTailable) {
- // Filter inside LiteParsedQuery is not used.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setTailable(true);
- ASSERT_OK(lpq->validate());
+ // Filter inside QueryRequest is not used.
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setTailable(true);
+ ASSERT_OK(qr->validate());
// Invalid: TEXT and tailable.
- ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
+ ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *qr));
}
TEST(CanonicalQueryTest, IsValidGeo) {
- // Filter inside LiteParsedQuery is not used.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- ASSERT_OK(lpq->validate());
+ // Filter inside QueryRequest is not used.
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ ASSERT_OK(qr->validate());
// Valid: regular GEO_NEAR.
- ASSERT_OK(isValid("{a: {$near: [0, 0]}}", *lpq));
+ ASSERT_OK(isValid("{a: {$near: [0, 0]}}", *qr));
// Valid: GEO_NEAR inside nested AND.
ASSERT_OK(
@@ -186,7 +186,7 @@ TEST(CanonicalQueryTest, IsValidGeo) {
" ]},"
" {c: 1}"
"]}",
- *lpq));
+ *qr));
// Invalid: >1 GEO_NEAR.
ASSERT_NOT_OK(
@@ -194,7 +194,7 @@ TEST(CanonicalQueryTest, IsValidGeo) {
" {a: {$near: [0, 0]}},"
" {b: {$near: [0, 0]}}"
"]}",
- *lpq));
+ *qr));
// Invalid: >1 GEO_NEAR.
ASSERT_NOT_OK(
@@ -202,7 +202,7 @@ TEST(CanonicalQueryTest, IsValidGeo) {
" {a: {$geoNear: [0, 0]}},"
" {b: {$near: [0, 0]}}"
"]}",
- *lpq));
+ *qr));
// Invalid: >1 GEO_NEAR.
ASSERT_NOT_OK(
@@ -216,7 +216,7 @@ TEST(CanonicalQueryTest, IsValidGeo) {
" {d: 1}"
" ]}"
"]}",
- *lpq));
+ *qr));
// Invalid: GEO_NEAR inside NOR.
ASSERT_NOT_OK(
@@ -224,7 +224,7 @@ TEST(CanonicalQueryTest, IsValidGeo) {
" {a: {$near: [0, 0]}},"
" {b: 1}"
"]}",
- *lpq));
+ *qr));
// Invalid: GEO_NEAR inside OR.
ASSERT_NOT_OK(
@@ -232,19 +232,19 @@ TEST(CanonicalQueryTest, IsValidGeo) {
" {a: {$near: [0, 0]}},"
" {b: 1}"
"]}",
- *lpq));
+ *qr));
}
TEST(CanonicalQueryTest, IsValidTextAndGeo) {
- // Filter inside LiteParsedQuery is not used.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- ASSERT_OK(lpq->validate());
+ // Filter inside QueryRequest is not used.
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ ASSERT_OK(qr->validate());
// Invalid: TEXT and GEO_NEAR.
- ASSERT_NOT_OK(isValid("{$text: {$search: 's'}, a: {$near: [0, 0]}}", *lpq));
+ ASSERT_NOT_OK(isValid("{$text: {$search: 's'}, a: {$near: [0, 0]}}", *qr));
// Invalid: TEXT and GEO_NEAR.
- ASSERT_NOT_OK(isValid("{$text: {$search: 's'}, a: {$geoNear: [0, 0]}}", *lpq));
+ ASSERT_NOT_OK(isValid("{$text: {$search: 's'}, a: {$geoNear: [0, 0]}}", *qr));
// Invalid: TEXT and GEO_NEAR.
ASSERT_NOT_OK(
@@ -253,69 +253,69 @@ TEST(CanonicalQueryTest, IsValidTextAndGeo) {
" {a: 1}"
" ],"
" b: {$near: [0, 0]}}",
- *lpq));
+ *qr));
}
TEST(CanonicalQueryTest, IsValidTextAndNaturalAscending) {
- // Filter inside LiteParsedQuery is not used.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setSort(fromjson("{$natural: 1}"));
- ASSERT_OK(lpq->validate());
+ // Filter inside QueryRequest is not used.
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setSort(fromjson("{$natural: 1}"));
+ ASSERT_OK(qr->validate());
// Invalid: TEXT and {$natural: 1} sort order.
- ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
+ ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *qr));
}
TEST(CanonicalQueryTest, IsValidTextAndNaturalDescending) {
- // Filter inside LiteParsedQuery is not used.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setSort(fromjson("{$natural: -1}"));
- ASSERT_OK(lpq->validate());
+ // Filter inside QueryRequest is not used.
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setSort(fromjson("{$natural: -1}"));
+ ASSERT_OK(qr->validate());
// Invalid: TEXT and {$natural: -1} sort order.
- ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
+ ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *qr));
}
TEST(CanonicalQueryTest, IsValidTextAndHint) {
- // Filter inside LiteParsedQuery is not used.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setHint(fromjson("{a: 1}"));
- ASSERT_OK(lpq->validate());
+ // Filter inside QueryRequest is not used.
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setHint(fromjson("{a: 1}"));
+ ASSERT_OK(qr->validate());
// Invalid: TEXT and {$natural: -1} sort order.
- ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
+ ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *qr));
}
// SERVER-14366
TEST(CanonicalQueryTest, IsValidGeoNearNaturalSort) {
- // Filter inside LiteParsedQuery is not used.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setSort(fromjson("{$natural: 1}"));
- ASSERT_OK(lpq->validate());
+ // Filter inside QueryRequest is not used.
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setSort(fromjson("{$natural: 1}"));
+ ASSERT_OK(qr->validate());
// Invalid: GEO_NEAR and {$natural: 1} sort order.
- ASSERT_NOT_OK(isValid("{a: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}", *lpq));
+ ASSERT_NOT_OK(isValid("{a: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}", *qr));
}
// SERVER-14366
TEST(CanonicalQueryTest, IsValidGeoNearNaturalHint) {
- // Filter inside LiteParsedQuery is not used.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setHint(fromjson("{$natural: 1}"));
- ASSERT_OK(lpq->validate());
+ // Filter inside QueryRequest is not used.
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setHint(fromjson("{$natural: 1}"));
+ ASSERT_OK(qr->validate());
// Invalid: GEO_NEAR and {$natural: 1} hint.
- ASSERT_NOT_OK(isValid("{a: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}", *lpq));
+ ASSERT_NOT_OK(isValid("{a: {$near: {$geometry: {type: 'Point', coordinates: [0, 0]}}}}", *qr));
}
TEST(CanonicalQueryTest, IsValidTextAndSnapshot) {
- // Filter inside LiteParsedQuery is not used.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setSnapshot(true);
- ASSERT_OK(lpq->validate());
+ // Filter inside QueryRequest is not used.
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setSnapshot(true);
+ ASSERT_OK(qr->validate());
// Invalid: TEXT and snapshot.
- ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *lpq));
+ ASSERT_NOT_OK(isValid("{$text: {$search: 's'}}", *qr));
}
TEST(CanonicalQueryTest, IsValidSortKeyMetaProjection) {
@@ -325,51 +325,51 @@ TEST(CanonicalQueryTest, IsValidSortKeyMetaProjection) {
// Passing a sortKey meta-projection without a sort is an error.
{
const bool isExplain = false;
- auto lpq = assertGet(LiteParsedQuery::makeFromFindCommand(
+ auto qr = assertGet(QueryRequest::makeFromFindCommand(
nss, fromjson("{find: 'testcoll', projection: {foo: {$meta: 'sortKey'}}}"), isExplain));
auto cq = CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_NOT_OK(cq.getStatus());
}
// Should be able to successfully create a CQ when there is a sort.
{
const bool isExplain = false;
- auto lpq = assertGet(LiteParsedQuery::makeFromFindCommand(
+ auto qr = assertGet(QueryRequest::makeFromFindCommand(
nss,
fromjson("{find: 'testcoll', projection: {foo: {$meta: 'sortKey'}}, sort: {bar: 1}}"),
isExplain));
auto cq = CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(cq.getStatus());
}
}
TEST(CanonicalQueryTest, IsValidNaturalSortIndexHint) {
const bool isExplain = false;
- auto lpq = assertGet(LiteParsedQuery::makeFromFindCommand(
+ auto qr = assertGet(QueryRequest::makeFromFindCommand(
nss, fromjson("{find: 'testcoll', sort: {$natural: 1}, hint: {a: 1}}"), isExplain));
// Invalid: {$natural: 1} sort order and index hint.
- ASSERT_NOT_OK(isValid("{}", *lpq));
+ ASSERT_NOT_OK(isValid("{}", *qr));
}
TEST(CanonicalQueryTest, IsValidNaturalSortNaturalHint) {
const bool isExplain = false;
- auto lpq = assertGet(LiteParsedQuery::makeFromFindCommand(
+ auto qr = assertGet(QueryRequest::makeFromFindCommand(
nss, fromjson("{find: 'testcoll', sort: {$natural: 1}, hint: {$natural: 1}}"), isExplain));
// Valid: {$natural: 1} sort order and {$natural: 1} hint.
- ASSERT_OK(isValid("{}", *lpq));
+ ASSERT_OK(isValid("{}", *qr));
}
TEST(CanonicalQueryTest, IsValidNaturalSortNaturalHintDifferentDirections) {
const bool isExplain = false;
- auto lpq = assertGet(LiteParsedQuery::makeFromFindCommand(
+ auto qr = assertGet(QueryRequest::makeFromFindCommand(
nss, fromjson("{find: 'testcoll', sort: {$natural: 1}, hint: {$natural: -1}}"), isExplain));
// Invalid: {$natural: 1} sort order and {$natural: -1} hint.
- ASSERT_NOT_OK(isValid("{}", *lpq));
+ ASSERT_NOT_OK(isValid("{}", *qr));
}
//
@@ -435,10 +435,10 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr) {
QueryTestServiceContext serviceContext;
auto txn = serviceContext.makeOperationContext();
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(fromjson(queryStr));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson(queryStr));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -449,12 +449,12 @@ std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
QueryTestServiceContext serviceContext;
auto txn = serviceContext.makeOperationContext();
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(fromjson(queryStr));
- lpq->setSort(fromjson(sortStr));
- lpq->setProj(fromjson(projStr));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson(queryStr));
+ qr->setSort(fromjson(sortStr));
+ qr->setProj(fromjson(projStr));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -561,9 +561,9 @@ TEST(CanonicalQueryTest, CanonicalizeFromBaseQuery) {
const bool isExplain = true;
const std::string cmdStr =
"{find:'bogusns', filter:{$or:[{a:1,b:1},{a:1,c:1}]}, projection:{a:1}, sort:{b:1}}";
- auto lpq = assertGet(LiteParsedQuery::makeFromFindCommand(nss, fromjson(cmdStr), isExplain));
+ auto qr = assertGet(QueryRequest::makeFromFindCommand(nss, fromjson(cmdStr), isExplain));
auto baseCq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions()));
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
MatchExpression* firstClauseExpr = baseCq->root()->getChild(0);
auto childCq = assertGet(CanonicalQuery::canonicalize(
@@ -571,32 +571,32 @@ TEST(CanonicalQueryTest, CanonicalizeFromBaseQuery) {
// Descriptive test. The childCq's filter should be the relevant $or clause, rather than the
// entire query predicate.
- ASSERT_EQ(childCq->getParsed().getFilter(), baseCq->getParsed().getFilter());
+ ASSERT_EQ(childCq->getQueryRequest().getFilter(), baseCq->getQueryRequest().getFilter());
- ASSERT_EQ(childCq->getParsed().getProj(), baseCq->getParsed().getProj());
- ASSERT_EQ(childCq->getParsed().getSort(), baseCq->getParsed().getSort());
- ASSERT_TRUE(childCq->getParsed().isExplain());
+ ASSERT_EQ(childCq->getQueryRequest().getProj(), baseCq->getQueryRequest().getProj());
+ ASSERT_EQ(childCq->getQueryRequest().getSort(), baseCq->getQueryRequest().getSort());
+ ASSERT_TRUE(childCq->getQueryRequest().isExplain());
}
-TEST(CanonicalQueryTest, CanonicalQueryFromLPQWithNoCollation) {
+TEST(CanonicalQueryTest, CanonicalQueryFromQRWithNoCollation) {
QueryTestServiceContext serviceContext;
auto txn = serviceContext.makeOperationContext();
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
auto cq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions()));
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
ASSERT_TRUE(cq->getCollator() == nullptr);
}
-TEST(CanonicalQueryTest, CanonicalQueryFromLPQWithCollation) {
+TEST(CanonicalQueryTest, CanonicalQueryFromQRWithCollation) {
QueryTestServiceContext serviceContext;
auto txn = serviceContext.makeOperationContext();
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setCollation(BSON("locale"
- << "reverse"));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setCollation(BSON("locale"
+ << "reverse"));
auto cq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions()));
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
ASSERT_TRUE(CollatorInterface::collatorsMatch(cq->getCollator(), &collator));
}
@@ -605,10 +605,10 @@ TEST(CanonicalQueryTest, CanonicalQueryFromBaseQueryWithNoCollation) {
QueryTestServiceContext serviceContext;
auto txn = serviceContext.makeOperationContext();
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(fromjson("{$or:[{a:1,b:1},{a:1,c:1}]}"));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson("{$or:[{a:1,b:1},{a:1,c:1}]}"));
auto baseCq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions()));
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
MatchExpression* firstClauseExpr = baseCq->root()->getChild(0);
auto childCq = assertGet(CanonicalQuery::canonicalize(
txn.get(), *baseCq, firstClauseExpr, ExtensionsCallbackDisallowExtensions()));
@@ -620,12 +620,12 @@ TEST(CanonicalQueryTest, CanonicalQueryFromBaseQueryWithCollation) {
QueryTestServiceContext serviceContext;
auto txn = serviceContext.makeOperationContext();
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(fromjson("{$or:[{a:1,b:1},{a:1,c:1}]}"));
- lpq->setCollation(BSON("locale"
- << "reverse"));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson("{$or:[{a:1,b:1},{a:1,c:1}]}"));
+ qr->setCollation(BSON("locale"
+ << "reverse"));
auto baseCq = assertGet(CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions()));
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
MatchExpression* firstClauseExpr = baseCq->root()->getChild(0);
auto childCq = assertGet(CanonicalQuery::canonicalize(
txn.get(), *baseCq, firstClauseExpr, ExtensionsCallbackDisallowExtensions()));
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index 88f09fcfa19..5b365500e16 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -88,12 +88,12 @@ bool shouldSaveCursor(OperationContext* txn,
return false;
}
- const LiteParsedQuery& pq = exec->getCanonicalQuery()->getParsed();
- if (!pq.wantMore() && !pq.isTailable()) {
+ const QueryRequest& qr = exec->getCanonicalQuery()->getQueryRequest();
+ if (!qr.wantMore() && !qr.isTailable()) {
return false;
}
- if (pq.getNToReturn().value_or(0) == 1) {
+ if (qr.getNToReturn().value_or(0) == 1) {
return false;
}
@@ -103,7 +103,7 @@ bool shouldSaveCursor(OperationContext* txn,
// SERVER-13955: we should be able to create a tailable cursor that waits on
// an empty collection. Right now we do not keep a cursor if the collection
// has zero records.
- if (pq.isTailable()) {
+ if (qr.isTailable()) {
return collection && collection->numRecords(txn) != 0U;
}
@@ -524,11 +524,11 @@ std::string runQuery(OperationContext* txn,
std::unique_ptr<PlanExecutor> exec = uassertStatusOK(
getExecutorFind(txn, collection, nss, std::move(cq), PlanExecutor::YIELD_AUTO));
- const LiteParsedQuery& pq = exec->getCanonicalQuery()->getParsed();
+ const QueryRequest& qr = exec->getCanonicalQuery()->getQueryRequest();
// If it's actually an explain, do the explain and return rather than falling through
// to the normal query execution loop.
- if (pq.isExplain()) {
+ if (qr.isExplain()) {
BufBuilder bb;
bb.skip(sizeof(QueryResult::Value));
@@ -554,16 +554,16 @@ std::string runQuery(OperationContext* txn,
}
// Handle query option $maxTimeMS (not used with commands).
- if (pq.getMaxTimeMS() > 0) {
+ if (qr.getMaxTimeMS() > 0) {
uassert(40116,
"Illegal attempt to set operation deadline within DBDirectClient",
!txn->getClient()->isInDirectClient());
- txn->setDeadlineAfterNowBy(Milliseconds{pq.getMaxTimeMS()});
+ txn->setDeadlineAfterNowBy(Milliseconds{qr.getMaxTimeMS()});
}
txn->checkForInterrupt(); // May trigger maxTimeAlwaysTimeOut fail point.
// uassert if we are not on a primary, and not a secondary with SlaveOk query parameter set.
- bool slaveOK = pq.isSlaveOk() || pq.hasReadPref();
+ bool slaveOK = qr.isSlaveOk() || qr.hasReadPref();
Status serveReadsStatus =
repl::getGlobalReplicationCoordinator()->checkCanServeReadsFor(txn, nss, slaveOK);
uassertStatusOK(serveReadsStatus);
@@ -604,16 +604,16 @@ std::string runQuery(OperationContext* txn,
++numResults;
// Possibly note slave's position in the oplog.
- if (pq.isOplogReplay()) {
+ if (qr.isOplogReplay()) {
BSONElement e = obj["ts"];
if (Date == e.type() || bsonTimestamp == e.type()) {
slaveReadTill = e.timestamp();
}
}
- if (FindCommon::enoughForFirstBatch(pq, numResults)) {
- LOG(5) << "Enough for first batch, wantMore=" << pq.wantMore()
- << " ntoreturn=" << pq.getNToReturn().value_or(0) << " numResults=" << numResults
+ if (FindCommon::enoughForFirstBatch(qr, numResults)) {
+ LOG(5) << "Enough for first batch, wantMore=" << qr.wantMore()
+ << " ntoreturn=" << qr.getNToReturn().value_or(0) << " numResults=" << numResults
<< endl;
break;
}
@@ -655,20 +655,20 @@ std::string runQuery(OperationContext* txn,
exec.release(),
nss.ns(),
txn->recoveryUnit()->isReadingFromMajorityCommittedSnapshot(),
- pq.getOptions(),
- pq.getFilter());
+ qr.getOptions(),
+ qr.getFilter());
ccId = cc->cursorid();
LOG(5) << "caching executor with cursorid " << ccId << " after returning " << numResults
<< " results" << endl;
// TODO document
- if (pq.isOplogReplay() && !slaveReadTill.isNull()) {
+ if (qr.isOplogReplay() && !slaveReadTill.isNull()) {
cc->slaveReadTill(slaveReadTill);
}
// TODO document
- if (pq.isExhaust()) {
+ if (qr.isExhaust()) {
curOp.debug().exhaust = true;
}
@@ -689,12 +689,12 @@ std::string runQuery(OperationContext* txn,
bb.decouple();
// Fill out the output buffer's header.
- QueryResult::View qr = result.header().view2ptr();
- qr.setCursorId(ccId);
- qr.setResultFlagsToOk();
- qr.msgdata().setOperation(opReply);
- qr.setStartingFrom(0);
- qr.setNReturned(numResults);
+ QueryResult::View queryResultView = result.header().view2ptr();
+ queryResultView.setCursorId(ccId);
+ queryResultView.setResultFlagsToOk();
+ queryResultView.msgdata().setOperation(opReply);
+ queryResultView.setStartingFrom(0);
+ queryResultView.setNReturned(numResults);
// curOp.debug().exhaust is set above.
return curOp.debug().exhaust ? nss.ns() : "";
diff --git a/src/mongo/db/query/find_common.cpp b/src/mongo/db/query/find_common.cpp
index d09ad8fd65f..d6864765cac 100644
--- a/src/mongo/db/query/find_common.cpp
+++ b/src/mongo/db/query/find_common.cpp
@@ -31,20 +31,20 @@
#include "mongo/db/query/find_common.h"
#include "mongo/bson/bsonobj.h"
-#include "mongo/db/query/lite_parsed_query.h"
+#include "mongo/db/query/query_request.h"
#include "mongo/util/assert_util.h"
namespace mongo {
MONGO_FP_DECLARE(keepCursorPinnedDuringGetMore);
-bool FindCommon::enoughForFirstBatch(const LiteParsedQuery& pq, long long numDocs) {
- if (!pq.getEffectiveBatchSize()) {
+bool FindCommon::enoughForFirstBatch(const QueryRequest& qr, long long numDocs) {
+ if (!qr.getEffectiveBatchSize()) {
// We enforce a default batch size for the initial find if no batch size is specified.
- return numDocs >= LiteParsedQuery::kDefaultBatchSize;
+ return numDocs >= QueryRequest::kDefaultBatchSize;
}
- return numDocs >= pq.getEffectiveBatchSize().value();
+ return numDocs >= qr.getEffectiveBatchSize().value();
}
bool FindCommon::haveSpaceForNext(const BSONObj& nextDoc, long long numDocs, int bytesBuffered) {
@@ -64,7 +64,7 @@ BSONObj FindCommon::transformSortSpec(const BSONObj& sortSpec) {
for (BSONElement elt : sortSpec) {
if (elt.isNumber()) {
comparatorBob.append(elt);
- } else if (LiteParsedQuery::isTextScoreMeta(elt)) {
+ } else if (QueryRequest::isTextScoreMeta(elt)) {
// Sort text score decreasing by default. Field name doesn't matter but we choose
// something that a user shouldn't ever have.
comparatorBob.append("$metaTextScore", -1);
diff --git a/src/mongo/db/query/find_common.h b/src/mongo/db/query/find_common.h
index 93483949ee7..2ae9fa2ed8f 100644
--- a/src/mongo/db/query/find_common.h
+++ b/src/mongo/db/query/find_common.h
@@ -32,7 +32,7 @@
namespace mongo {
class BSONObj;
-class LiteParsedQuery;
+class QueryRequest;
// Enabling this fail point will cause the getMore command to busy wait after pinning the cursor,
// until the fail point is disabled.
@@ -58,9 +58,9 @@ public:
/**
* Returns true if the batchSize for the initial find has been satisfied.
*
- * If 'pq' does not have a batchSize, the default batchSize is respected.
+ * If 'qr' does not have a batchSize, the default batchSize is respected.
*/
- static bool enoughForFirstBatch(const LiteParsedQuery& pq, long long numDocs);
+ static bool enoughForFirstBatch(const QueryRequest& qr, long long numDocs);
/**
* Returns true if the batchSize for the getMore has been satisfied.
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 197ea175e14..22efcb696de 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -279,8 +279,8 @@ Status prepareExecution(OperationContext* opCtx,
*rootOut = new SortKeyGeneratorStage(opCtx,
*rootOut,
ws,
- canonicalQuery->getParsed().getSort(),
- canonicalQuery->getParsed().getFilter(),
+ canonicalQuery->getQueryRequest().getSort(),
+ canonicalQuery->getQueryRequest().getFilter(),
canonicalQuery->getCollator());
}
@@ -301,7 +301,7 @@ Status prepareExecution(OperationContext* opCtx,
}
// Tailable: If the query requests tailable the collection must be capped.
- if (canonicalQuery->getParsed().isTailable()) {
+ if (canonicalQuery->getQueryRequest().isTailable()) {
if (!collection->isCapped()) {
return Status(ErrorCodes::BadValue,
"error processing query: " + canonicalQuery->toString() +
@@ -560,7 +560,7 @@ StatusWith<unique_ptr<PlanExecutor>> getOplogStartHack(OperationContext* txn,
params.collection = collection;
params.start = *startLoc;
params.direction = CollectionScanParams::FORWARD;
- params.tailable = cq->getParsed().isTailable();
+ params.tailable = cq->getQueryRequest().isTailable();
unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
unique_ptr<CollectionScan> cs = make_unique<CollectionScan>(txn, params, ws.get(), cq->root());
@@ -576,7 +576,7 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorFind(OperationContext* txn,
const NamespaceString& nss,
unique_ptr<CanonicalQuery> canonicalQuery,
PlanExecutor::YieldPolicy yieldPolicy) {
- if (NULL != collection && canonicalQuery->getParsed().isOplogReplay()) {
+ if (NULL != collection && canonicalQuery->getQueryRequest().isOplogReplay()) {
return getOplogStartHack(txn, collection, std::move(canonicalQuery));
}
@@ -940,13 +940,13 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorGroup(OperationContext* txn,
}
const NamespaceString nss(request.ns);
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(request.query);
- lpq->setExplain(request.explain);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(request.query);
+ qr->setExplain(request.explain);
const ExtensionsCallbackReal extensionsCallback(txn, &nss);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(lpq), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
@@ -1163,15 +1163,15 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorCount(OperationContext* txn,
PlanExecutor::YieldPolicy yieldPolicy) {
unique_ptr<WorkingSet> ws = make_unique<WorkingSet>();
- auto lpq = stdx::make_unique<LiteParsedQuery>(request.getNs());
- lpq->setFilter(request.getQuery());
- lpq->setCollation(request.getCollation());
- lpq->setHint(request.getHint());
- lpq->setExplain(explain);
+ auto qr = stdx::make_unique<QueryRequest>(request.getNs());
+ qr->setFilter(request.getQuery());
+ qr->setCollation(request.getCollation());
+ qr->setHint(request.getHint());
+ qr->setExplain(explain);
auto cq = CanonicalQuery::canonicalize(
txn,
- std::move(lpq),
+ std::move(qr),
collection
? static_cast<const ExtensionsCallback&>(ExtensionsCallbackReal(txn, &collection->ns()))
: static_cast<const ExtensionsCallback&>(ExtensionsCallbackNoop()));
@@ -1338,11 +1338,11 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
// If there are no suitable indices for the distinct hack bail out now into regular planning
// with no projection.
if (plannerParams.indices.empty()) {
- auto lpq = stdx::make_unique<LiteParsedQuery>(collection->ns());
- lpq->setFilter(query);
- lpq->setExplain(isExplain);
+ auto qr = stdx::make_unique<QueryRequest>(collection->ns());
+ qr->setFilter(query);
+ qr->setExplain(isExplain);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(lpq), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
@@ -1359,12 +1359,12 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
// (ie _id:1 being implied by default).
BSONObj projection = getDistinctProjection(field);
- auto lpq = stdx::make_unique<LiteParsedQuery>(collection->ns());
- lpq->setFilter(query);
- lpq->setExplain(isExplain);
- lpq->setProj(projection);
+ auto qr = stdx::make_unique<QueryRequest>(collection->ns());
+ qr->setFilter(query);
+ qr->setExplain(isExplain);
+ qr->setProj(projection);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(lpq), extensionsCallback);
+ auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), extensionsCallback);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
@@ -1450,12 +1450,11 @@ StatusWith<unique_ptr<PlanExecutor>> getExecutorDistinct(OperationContext* txn,
}
// We drop the projection from the 'cq'. Unfortunately this is not trivial.
- auto lpqNoProjection = stdx::make_unique<LiteParsedQuery>(collection->ns());
- lpqNoProjection->setFilter(query);
- lpqNoProjection->setExplain(isExplain);
+ auto qrNoProjection = stdx::make_unique<QueryRequest>(collection->ns());
+ qrNoProjection->setFilter(query);
+ qrNoProjection->setExplain(isExplain);
- statusWithCQ =
- CanonicalQuery::canonicalize(txn, std::move(lpqNoProjection), extensionsCallback);
+ statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qrNoProjection), extensionsCallback);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
diff --git a/src/mongo/db/query/get_executor_test.cpp b/src/mongo/db/query/get_executor_test.cpp
index bc8d6c40669..82e8e865e25 100644
--- a/src/mongo/db/query/get_executor_test.cpp
+++ b/src/mongo/db/query/get_executor_test.cpp
@@ -56,12 +56,12 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
QueryTestServiceContext serviceContext;
auto txn = serviceContext.makeOperationContext();
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(fromjson(queryStr));
- lpq->setSort(fromjson(sortStr));
- lpq->setProj(fromjson(projStr));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson(queryStr));
+ qr->setSort(fromjson(sortStr));
+ qr->setProj(fromjson(projStr));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
diff --git a/src/mongo/db/query/getmore_request.cpp b/src/mongo/db/query/getmore_request.cpp
index b1a97f32c5e..6244d77ff56 100644
--- a/src/mongo/db/query/getmore_request.cpp
+++ b/src/mongo/db/query/getmore_request.cpp
@@ -136,7 +136,7 @@ StatusWith<GetMoreRequest> GetMoreRequest::parseFromBSON(const std::string& dbna
batchSize = el.numberLong();
} else if (str::equals(fieldName, kAwaitDataTimeoutField)) {
- auto maxAwaitDataTime = LiteParsedQuery::parseMaxTimeMS(el);
+ auto maxAwaitDataTime = QueryRequest::parseMaxTimeMS(el);
if (!maxAwaitDataTime.isOK()) {
return maxAwaitDataTime.getStatus();
}
diff --git a/src/mongo/db/query/lite_parsed_query_test.cpp b/src/mongo/db/query/lite_parsed_query_test.cpp
deleted file mode 100644
index 17affe6f9c3..00000000000
--- a/src/mongo/db/query/lite_parsed_query_test.cpp
+++ /dev/null
@@ -1,1097 +0,0 @@
-/**
- * Copyright (C) 2013 MongoDB Inc.
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU Affero General Public License, version 3,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Affero General Public License for more details.
- *
- * You should have received a copy of the GNU Affero General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * As a special exception, the copyright holders give permission to link the
- * code of portions of this program with the OpenSSL library under certain
- * conditions as described in each individual source file and distribute
- * linked combinations including the program with the OpenSSL library. You
- * must comply with the GNU Affero General Public License in all respects for
- * all of the code used other than as permitted herein. If you modify file(s)
- * with this exception, you may extend this exception to your version of the
- * file(s), but you are not obligated to do so. If you do not wish to do so,
- * delete this exception statement from your version. If you delete this
- * exception statement from all source files in the program, then also delete
- * it in the license file.
- */
-
-#include "mongo/platform/basic.h"
-
-#include <boost/optional.hpp>
-#include <boost/optional/optional_io.hpp>
-
-#include "mongo/db/json.h"
-#include "mongo/db/namespace_string.h"
-#include "mongo/db/query/lite_parsed_query.h"
-#include "mongo/unittest/unittest.h"
-
-namespace mongo {
-namespace {
-
-using std::unique_ptr;
-using unittest::assertGet;
-
-static const NamespaceString testns("testdb.testcoll");
-
-TEST(LiteParsedQueryTest, LimitWithNToReturn) {
- LiteParsedQuery lpq(testns);
- lpq.setLimit(0);
- lpq.setNToReturn(0);
- ASSERT_NOT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, BatchSizeWithNToReturn) {
- LiteParsedQuery lpq(testns);
- lpq.setBatchSize(0);
- lpq.setNToReturn(0);
- ASSERT_NOT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, NegativeSkip) {
- LiteParsedQuery lpq(testns);
- lpq.setSkip(-1);
- ASSERT_NOT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, ZeroSkip) {
- LiteParsedQuery lpq(testns);
- lpq.setSkip(0);
- ASSERT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, PositiveSkip) {
- LiteParsedQuery lpq(testns);
- lpq.setSkip(1);
- ASSERT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, NegativeLimit) {
- LiteParsedQuery lpq(testns);
- lpq.setLimit(-1);
- ASSERT_NOT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, ZeroLimit) {
- LiteParsedQuery lpq(testns);
- lpq.setLimit(0);
- ASSERT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, PositiveLimit) {
- LiteParsedQuery lpq(testns);
- lpq.setLimit(1);
- ASSERT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, NegativeBatchSize) {
- LiteParsedQuery lpq(testns);
- lpq.setBatchSize(-1);
- ASSERT_NOT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, ZeroBatchSize) {
- LiteParsedQuery lpq(testns);
- lpq.setBatchSize(0);
- ASSERT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, PositiveBatchSize) {
- LiteParsedQuery lpq(testns);
- lpq.setBatchSize(1);
- ASSERT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, NegativeNToReturn) {
- LiteParsedQuery lpq(testns);
- lpq.setNToReturn(-1);
- ASSERT_NOT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, ZeroNToReturn) {
- LiteParsedQuery lpq(testns);
- lpq.setNToReturn(0);
- ASSERT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, PositiveNToReturn) {
- LiteParsedQuery lpq(testns);
- lpq.setNToReturn(1);
- ASSERT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, NegativeMaxScan) {
- LiteParsedQuery lpq(testns);
- lpq.setMaxScan(-1);
- ASSERT_NOT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, ZeroMaxScan) {
- LiteParsedQuery lpq(testns);
- lpq.setMaxScan(0);
- ASSERT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, PositiveMaxScan) {
- LiteParsedQuery lpq(testns);
- lpq.setMaxScan(1);
- ASSERT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, NegativeMaxTimeMS) {
- LiteParsedQuery lpq(testns);
- lpq.setMaxTimeMS(-1);
- ASSERT_NOT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, ZeroMaxTimeMS) {
- LiteParsedQuery lpq(testns);
- lpq.setMaxTimeMS(0);
- ASSERT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, PositiveMaxTimeMS) {
- LiteParsedQuery lpq(testns);
- lpq.setMaxTimeMS(1);
- ASSERT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, ValidSortOrder) {
- LiteParsedQuery lpq(testns);
- lpq.setSort(fromjson("{a: 1}"));
- ASSERT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, InvalidSortOrderString) {
- LiteParsedQuery lpq(testns);
- lpq.setSort(fromjson("{a: \"\"}"));
- ASSERT_NOT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, MinFieldsNotPrefixOfMax) {
- LiteParsedQuery lpq(testns);
- lpq.setMin(fromjson("{a: 1}"));
- lpq.setMax(fromjson("{b: 1}"));
- ASSERT_NOT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, MinFieldsMoreThanMax) {
- LiteParsedQuery lpq(testns);
- lpq.setMin(fromjson("{a: 1, b: 1}"));
- lpq.setMax(fromjson("{a: 1}"));
- ASSERT_NOT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, MinFieldsLessThanMax) {
- LiteParsedQuery lpq(testns);
- lpq.setMin(fromjson("{a: 1}"));
- lpq.setMax(fromjson("{a: 1, b: 1}"));
- ASSERT_NOT_OK(lpq.validate());
-}
-
-TEST(LiteParsedQueryTest, ForbidTailableWithNonNaturalSort) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "tailable: true,"
- "sort: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ForbidTailableWithSingleBatch) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "tailable: true,"
- "singleBatch: true}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, AllowTailableWithNaturalSort) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "tailable: true,"
- "sort: {$natural: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_OK(result.getStatus());
- ASSERT_TRUE(result.getValue()->isTailable());
- ASSERT_EQ(result.getValue()->getSort(), BSON("$natural" << 1));
-}
-
-TEST(LiteParsedQueryTest, IsIsolatedReturnsTrueWithIsolated) {
- ASSERT_TRUE(LiteParsedQuery::isQueryIsolated(BSON("$isolated" << 1)));
-}
-
-TEST(LiteParsedQueryTest, IsIsolatedReturnsTrueWithAtomic) {
- ASSERT_TRUE(LiteParsedQuery::isQueryIsolated(BSON("$atomic" << 1)));
-}
-
-TEST(LiteParsedQueryTest, IsIsolatedReturnsFalseWithIsolated) {
- ASSERT_FALSE(LiteParsedQuery::isQueryIsolated(BSON("$isolated" << false)));
-}
-
-TEST(LiteParsedQueryTest, IsIsolatedReturnsFalseWithAtomic) {
- ASSERT_FALSE(LiteParsedQuery::isQueryIsolated(BSON("$atomic" << false)));
-}
-
-//
-// Test compatibility of various projection and sort objects.
-//
-
-TEST(LiteParsedQueryTest, ValidSortProj) {
- LiteParsedQuery lpq(testns);
- lpq.setProj(fromjson("{a: 1}"));
- lpq.setSort(fromjson("{a: 1}"));
- ASSERT_OK(lpq.validate());
-
- LiteParsedQuery metaLPQ(testns);
- metaLPQ.setProj(fromjson("{a: {$meta: \"textScore\"}}"));
- metaLPQ.setSort(fromjson("{a: {$meta: \"textScore\"}}"));
- ASSERT_OK(metaLPQ.validate());
-}
-
-TEST(LiteParsedQueryTest, ForbidNonMetaSortOnFieldWithMetaProject) {
- LiteParsedQuery badLPQ(testns);
- badLPQ.setProj(fromjson("{a: {$meta: \"textScore\"}}"));
- badLPQ.setSort(fromjson("{a: 1}"));
- ASSERT_NOT_OK(badLPQ.validate());
-
- LiteParsedQuery goodLPQ(testns);
- goodLPQ.setProj(fromjson("{a: {$meta: \"textScore\"}}"));
- goodLPQ.setSort(fromjson("{b: 1}"));
- ASSERT_OK(goodLPQ.validate());
-}
-
-TEST(LiteParsedQueryTest, ForbidMetaSortOnFieldWithoutMetaProject) {
- LiteParsedQuery lpqMatching(testns);
- lpqMatching.setProj(fromjson("{a: 1}"));
- lpqMatching.setSort(fromjson("{a: {$meta: \"textScore\"}}"));
- ASSERT_NOT_OK(lpqMatching.validate());
-
- LiteParsedQuery lpqNonMatching(testns);
- lpqNonMatching.setProj(fromjson("{b: 1}"));
- lpqNonMatching.setSort(fromjson("{a: {$meta: \"textScore\"}}"));
- ASSERT_NOT_OK(lpqNonMatching.validate());
-}
-
-//
-// Text meta BSON element validation
-//
-
-bool isFirstElementTextScoreMeta(const char* sortStr) {
- BSONObj sortObj = fromjson(sortStr);
- BSONElement elt = sortObj.firstElement();
- bool result = LiteParsedQuery::isTextScoreMeta(elt);
- return result;
-}
-
-// Check validation of $meta expressions
-TEST(LiteParsedQueryTest, IsTextScoreMeta) {
- // Valid textScore meta sort
- ASSERT(isFirstElementTextScoreMeta("{a: {$meta: \"textScore\"}}"));
-
- // Invalid textScore meta sorts
- ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$meta: 1}}"));
- ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$meta: \"image\"}}"));
- ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$world: \"textScore\"}}"));
- ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$meta: \"textScore\", b: 1}}"));
-}
-
-//
-// Sort order validation
-// In a valid sort order, each element satisfies one of:
-// 1. a number with value 1
-// 2. a number with value -1
-// 3. isTextScoreMeta
-//
-
-TEST(LiteParsedQueryTest, ValidateSortOrder) {
- // Valid sorts
- ASSERT(LiteParsedQuery::isValidSortOrder(fromjson("{}")));
- ASSERT(LiteParsedQuery::isValidSortOrder(fromjson("{a: 1}")));
- ASSERT(LiteParsedQuery::isValidSortOrder(fromjson("{a: -1}")));
- ASSERT(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: \"textScore\"}}")));
-
- // Invalid sorts
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: 100}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: 0}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: -100}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: Infinity}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: -Infinity}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: true}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: false}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: null}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {}}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {b: 1}}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: []}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: [1, 2, 3]}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: \"\"}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: \"bb\"}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: 1}}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: \"image\"}}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{a: {$world: \"textScore\"}}")));
- ASSERT_FALSE(
- LiteParsedQuery::isValidSortOrder(fromjson("{a: {$meta: \"textScore\","
- " b: 1}}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{'': 1}")));
- ASSERT_FALSE(LiteParsedQuery::isValidSortOrder(fromjson("{'': -1}")));
-}
-
-//
-// Tests for parsing a lite parsed query from a command BSON object.
-//
-
-TEST(LiteParsedQueryTest, ParseFromCommandBasic) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 3},"
- "sort: {a: 1},"
- "projection: {_id: 0, a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandWithOptions) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 3},"
- "sort: {a: 1},"
- "projection: {_id: 0, a: 1},"
- "showRecordId: true,"
- "maxScan: 1000}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- // Make sure the values from the command BSON are reflected in the LPQ.
- ASSERT(lpq->showRecordId());
- ASSERT_EQUALS(1000, lpq->getMaxScan());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandHintAsString) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "hint: 'foo_1'}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- BSONObj hintObj = lpq->getHint();
- ASSERT_EQUALS(BSON("$hint"
- << "foo_1"),
- hintObj);
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandValidSortProj) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "projection: {a: 1},"
- "sort: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- ASSERT_OK(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandValidSortProjMeta) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "projection: {a: {$meta: 'textScore'}},"
- "sort: {a: {$meta: 'textScore'}}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- ASSERT_OK(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandAllFlagsTrue) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "tailable: true,"
- "oplogReplay: true,"
- "noCursorTimeout: true,"
- "awaitData: true,"
- "allowPartialResults: true}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- // Test that all the flags got set to true.
- ASSERT(lpq->isTailable());
- ASSERT(!lpq->isSlaveOk());
- ASSERT(lpq->isOplogReplay());
- ASSERT(lpq->isNoCursorTimeout());
- ASSERT(lpq->isAwaitData());
- ASSERT(lpq->isAllowPartialResults());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandCommentWithValidMinMax) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "comment: 'the comment',"
- "min: {a: 1},"
- "max: {a: 2}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- ASSERT_EQUALS("the comment", lpq->getComment());
- BSONObj expectedMin = BSON("a" << 1);
- ASSERT_EQUALS(0, expectedMin.woCompare(lpq->getMin()));
- BSONObj expectedMax = BSON("a" << 2);
- ASSERT_EQUALS(0, expectedMax.woCompare(lpq->getMax()));
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandAllNonOptionFields) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "sort: {b: 1},"
- "projection: {c: 1},"
- "hint: {d: 1},"
- "readConcern: {e: 1},"
- "collation: {f: 1},"
- "limit: 3,"
- "skip: 5,"
- "batchSize: 90,"
- "singleBatch: false}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- // Check the values inside the LPQ.
- BSONObj expectedQuery = BSON("a" << 1);
- ASSERT_EQUALS(0, expectedQuery.woCompare(lpq->getFilter()));
- BSONObj expectedSort = BSON("b" << 1);
- ASSERT_EQUALS(0, expectedSort.woCompare(lpq->getSort()));
- BSONObj expectedProj = BSON("c" << 1);
- ASSERT_EQUALS(0, expectedProj.woCompare(lpq->getProj()));
- BSONObj expectedHint = BSON("d" << 1);
- ASSERT_EQUALS(0, expectedHint.woCompare(lpq->getHint()));
- BSONObj expectedReadConcern = BSON("e" << 1);
- ASSERT_EQUALS(0, expectedReadConcern.woCompare(lpq->getReadConcern()));
- BSONObj expectedCollation = BSON("f" << 1);
- ASSERT_EQUALS(0, expectedCollation.woCompare(lpq->getCollation()));
- ASSERT_EQUALS(3, *lpq->getLimit());
- ASSERT_EQUALS(5, *lpq->getSkip());
- ASSERT_EQUALS(90, *lpq->getBatchSize());
- ASSERT(lpq->wantMore());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandLargeLimit) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "limit: 8000000000}"); // 8 * 1000 * 1000 * 1000
- const NamespaceString nss("test.testns");
- const bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- ASSERT_EQUALS(8LL * 1000 * 1000 * 1000, *lpq->getLimit());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandLargeBatchSize) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "batchSize: 8000000000}"); // 8 * 1000 * 1000 * 1000
- const NamespaceString nss("test.testns");
- const bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- ASSERT_EQUALS(8LL * 1000 * 1000 * 1000, *lpq->getBatchSize());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandLargeSkip) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "skip: 8000000000}"); // 8 * 1000 * 1000 * 1000
- const NamespaceString nss("test.testns");
- const bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- ASSERT_EQUALS(8LL * 1000 * 1000 * 1000, *lpq->getSkip());
-}
-
-//
-// Parsing errors where a field has the wrong type.
-//
-
-TEST(LiteParsedQueryTest, ParseFromCommandQueryWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandSortWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "sort: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandProjWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "projection: 'foo'}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandSkipWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "skip: '5',"
- "projection: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandLimitWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "limit: '5',"
- "projection: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandSingleBatchWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "singleBatch: 'false',"
- "projection: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandCommentWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "comment: 1}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandMaxScanWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "maxScan: true,"
- "comment: 'foo'}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandMaxTimeMSWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "maxTimeMS: true}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandMaxWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "max: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandMinWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "min: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandReturnKeyWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "returnKey: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-
-TEST(LiteParsedQueryTest, ParseFromCommandShowRecordIdWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "showRecordId: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandSnapshotWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "snapshot: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandTailableWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "tailable: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandSlaveOkWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "slaveOk: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandOplogReplayWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "oplogReplay: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandNoCursorTimeoutWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "noCursorTimeout: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandAwaitDataWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "tailable: true,"
- "awaitData: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandExhaustWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "exhaust: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandPartialWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "allowPartialResults: 3}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandReadConcernWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "readConcern: 'foo'}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandCollationWrongType) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "filter: {a: 1},"
- "collation: 'foo'}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-//
-// Parsing errors where a field has the right type but a bad value.
-//
-
-TEST(LiteParsedQueryTest, ParseFromCommandNegativeSkipError) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "skip: -3,"
- "filter: {a: 3}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandSkipIsZero) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "skip: 0,"
- "filter: {a: 3}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
- ASSERT_EQ(BSON("a" << 3), lpq->getFilter());
- ASSERT_FALSE(lpq->getSkip());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandNegativeLimitError) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "limit: -3,"
- "filter: {a: 3}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandLimitIsZero) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "limit: 0,"
- "filter: {a: 3}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
- ASSERT_EQ(BSON("a" << 3), lpq->getFilter());
- ASSERT_FALSE(lpq->getLimit());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandNegativeBatchSizeError) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "batchSize: -10,"
- "filter: {a: 3}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandBatchSizeZero) {
- BSONObj cmdObj = fromjson("{find: 'testns', batchSize: 0}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- ASSERT(lpq->getBatchSize());
- ASSERT_EQ(0, *lpq->getBatchSize());
-
- ASSERT(!lpq->getLimit());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandDefaultBatchSize) {
- BSONObj cmdObj = fromjson("{find: 'testns'}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- ASSERT(!lpq->getBatchSize());
- ASSERT(!lpq->getLimit());
-}
-
-//
-// Errors checked in LiteParsedQuery::validate().
-//
-
-TEST(LiteParsedQueryTest, ParseFromCommandMinMaxDifferentFieldsError) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "min: {a: 3},"
- "max: {b: 4}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandSnapshotPlusSortError) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "sort: {a: 3},"
- "snapshot: true}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandSnapshotPlusHintError) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "snapshot: true,"
- "hint: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseCommandForbidNonMetaSortOnFieldWithMetaProject) {
- BSONObj cmdObj;
-
- cmdObj = fromjson(
- "{find: 'testns',"
- "projection: {a: {$meta: 'textScore'}},"
- "sort: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-
- cmdObj = fromjson(
- "{find: 'testns',"
- "projection: {a: {$meta: 'textScore'}},"
- "sort: {b: 1}}");
- ASSERT_OK(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseCommandForbidMetaSortOnFieldWithoutMetaProject) {
- BSONObj cmdObj;
-
- cmdObj = fromjson(
- "{find: 'testns',"
- "projection: {a: 1},"
- "sort: {a: {$meta: 'textScore'}}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-
- cmdObj = fromjson(
- "{find: 'testns',"
- "projection: {b: 1},"
- "sort: {a: {$meta: 'textScore'}}}");
- result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseCommandForbidExhaust) {
- BSONObj cmdObj = fromjson("{find: 'testns', exhaust: true}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseCommandIsFromFindCommand) {
- BSONObj cmdObj = fromjson("{find: 'testns'}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
-
- ASSERT_FALSE(lpq->getNToReturn());
-}
-
-TEST(LiteParsedQueryTest, ParseCommandAwaitDataButNotTailable) {
- const NamespaceString nss("test.testns");
- BSONObj cmdObj = fromjson("{find: 'testns', awaitData: true}");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseCommandFirstFieldNotString) {
- BSONObj cmdObj = fromjson("{find: 1}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseCommandIgnoreShardVersionField) {
- BSONObj cmdObj = fromjson("{find: 'test.testns', shardVersion: 'foo'}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, DefaultQueryParametersCorrect) {
- BSONObj cmdObj = fromjson("{find: 'testns'}");
-
- const NamespaceString nss("test.testns");
- std::unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, false)));
-
- ASSERT_FALSE(lpq->getSkip());
- ASSERT_FALSE(lpq->getLimit());
-
- ASSERT_EQUALS(true, lpq->wantMore());
- ASSERT_FALSE(lpq->getNToReturn());
- ASSERT_EQUALS(false, lpq->isExplain());
- ASSERT_EQUALS(0, lpq->getMaxScan());
- ASSERT_EQUALS(0, lpq->getMaxTimeMS());
- ASSERT_EQUALS(false, lpq->returnKey());
- ASSERT_EQUALS(false, lpq->showRecordId());
- ASSERT_EQUALS(false, lpq->isSnapshot());
- ASSERT_EQUALS(false, lpq->hasReadPref());
- ASSERT_EQUALS(false, lpq->isTailable());
- ASSERT_EQUALS(false, lpq->isSlaveOk());
- ASSERT_EQUALS(false, lpq->isOplogReplay());
- ASSERT_EQUALS(false, lpq->isNoCursorTimeout());
- ASSERT_EQUALS(false, lpq->isAwaitData());
- ASSERT_EQUALS(false, lpq->isExhaust());
- ASSERT_EQUALS(false, lpq->isAllowPartialResults());
-}
-
-//
-// Extra fields cause the parse to fail.
-//
-
-TEST(LiteParsedQueryTest, ParseFromCommandForbidExtraField) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "snapshot: true,"
- "foo: {a: 1}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseFromCommandForbidExtraOption) {
- BSONObj cmdObj = fromjson(
- "{find: 'testns',"
- "snapshot: true,"
- "foo: true}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- auto result = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
-}
-
-TEST(LiteParsedQueryTest, ParseMaxTimeMSStringValueFails) {
- BSONObj maxTimeObj = BSON(LiteParsedQuery::cmdOptionMaxTimeMS << "foo");
- ASSERT_NOT_OK(LiteParsedQuery::parseMaxTimeMS(maxTimeObj[LiteParsedQuery::cmdOptionMaxTimeMS]));
-}
-
-TEST(LiteParsedQueryTest, ParseMaxTimeMSNonIntegralValueFails) {
- BSONObj maxTimeObj = BSON(LiteParsedQuery::cmdOptionMaxTimeMS << 100.3);
- ASSERT_NOT_OK(LiteParsedQuery::parseMaxTimeMS(maxTimeObj[LiteParsedQuery::cmdOptionMaxTimeMS]));
-}
-
-TEST(LiteParsedQueryTest, ParseMaxTimeMSOutOfRangeDoubleFails) {
- BSONObj maxTimeObj = BSON(LiteParsedQuery::cmdOptionMaxTimeMS << 1e200);
- ASSERT_NOT_OK(LiteParsedQuery::parseMaxTimeMS(maxTimeObj[LiteParsedQuery::cmdOptionMaxTimeMS]));
-}
-
-TEST(LiteParsedQueryTest, ParseMaxTimeMSNegativeValueFails) {
- BSONObj maxTimeObj = BSON(LiteParsedQuery::cmdOptionMaxTimeMS << -400);
- ASSERT_NOT_OK(LiteParsedQuery::parseMaxTimeMS(maxTimeObj[LiteParsedQuery::cmdOptionMaxTimeMS]));
-}
-
-TEST(LiteParsedQueryTest, ParseMaxTimeMSZeroSucceeds) {
- BSONObj maxTimeObj = BSON(LiteParsedQuery::cmdOptionMaxTimeMS << 0);
- auto maxTime = LiteParsedQuery::parseMaxTimeMS(maxTimeObj[LiteParsedQuery::cmdOptionMaxTimeMS]);
- ASSERT_OK(maxTime);
- ASSERT_EQ(maxTime.getValue(), 0);
-}
-
-TEST(LiteParsedQueryTest, ParseMaxTimeMSPositiveInRangeSucceeds) {
- BSONObj maxTimeObj = BSON(LiteParsedQuery::cmdOptionMaxTimeMS << 300);
- auto maxTime = LiteParsedQuery::parseMaxTimeMS(maxTimeObj[LiteParsedQuery::cmdOptionMaxTimeMS]);
- ASSERT_OK(maxTime);
- ASSERT_EQ(maxTime.getValue(), 300);
-}
-
-} // namespace mongo
-} // namespace
diff --git a/src/mongo/db/query/parsed_projection.cpp b/src/mongo/db/query/parsed_projection.cpp
index f3ebf7736b0..4baffb815d4 100644
--- a/src/mongo/db/query/parsed_projection.cpp
+++ b/src/mongo/db/query/parsed_projection.cpp
@@ -28,7 +28,7 @@
#include "mongo/db/query/parsed_projection.h"
-#include "mongo/db/query/lite_parsed_query.h"
+#include "mongo/db/query/query_request.h"
namespace mongo {
@@ -149,28 +149,28 @@ Status ParsedProjection::make(const BSONObj& spec,
return Status(ErrorCodes::BadValue, "unexpected argument to $meta in proj");
}
- if (e2.valuestr() != LiteParsedQuery::metaTextScore &&
- e2.valuestr() != LiteParsedQuery::metaRecordId &&
- e2.valuestr() != LiteParsedQuery::metaIndexKey &&
- e2.valuestr() != LiteParsedQuery::metaGeoNearDistance &&
- e2.valuestr() != LiteParsedQuery::metaGeoNearPoint &&
- e2.valuestr() != LiteParsedQuery::metaSortKey) {
+ if (e2.valuestr() != QueryRequest::metaTextScore &&
+ e2.valuestr() != QueryRequest::metaRecordId &&
+ e2.valuestr() != QueryRequest::metaIndexKey &&
+ e2.valuestr() != QueryRequest::metaGeoNearDistance &&
+ e2.valuestr() != QueryRequest::metaGeoNearPoint &&
+ e2.valuestr() != QueryRequest::metaSortKey) {
return Status(ErrorCodes::BadValue, "unsupported $meta operator: " + e2.str());
}
// This clobbers everything else.
- if (e2.valuestr() == LiteParsedQuery::metaIndexKey) {
+ if (e2.valuestr() == QueryRequest::metaIndexKey) {
hasIndexKeyProjection = true;
- } else if (e2.valuestr() == LiteParsedQuery::metaGeoNearDistance) {
+ } else if (e2.valuestr() == QueryRequest::metaGeoNearDistance) {
wantGeoNearDistance = true;
- } else if (e2.valuestr() == LiteParsedQuery::metaGeoNearPoint) {
+ } else if (e2.valuestr() == QueryRequest::metaGeoNearPoint) {
wantGeoNearPoint = true;
- } else if (e2.valuestr() == LiteParsedQuery::metaSortKey) {
+ } else if (e2.valuestr() == QueryRequest::metaSortKey) {
wantSortKey = true;
}
// Of the $meta projections, only sortKey can be covered.
- if (e2.valuestr() != LiteParsedQuery::metaSortKey) {
+ if (e2.valuestr() != QueryRequest::metaSortKey) {
requiresDocument = true;
}
pp->_metaFields.push_back(elem.fieldNameStringData());
@@ -255,7 +255,7 @@ Status ParsedProjection::make(const BSONObj& spec,
// expression to know which array element was matched.
pp->_requiresMatchDetails = arrayOpType == ARRAY_OP_POSITIONAL;
- // Save the raw spec. It should be owned by the LiteParsedQuery.
+ // Save the raw spec. It should be owned by the QueryRequest.
verify(spec.isOwned());
pp->_source = spec;
pp->_returnKey = hasIndexKeyProjection;
diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp
index ca1f93d9d6f..8268555215d 100644
--- a/src/mongo/db/query/plan_cache.cpp
+++ b/src/mongo/db/query/plan_cache.cpp
@@ -250,30 +250,30 @@ void encodeGeoNearMatchExpression(const GeoNearMatchExpression* tree, StringBuil
//
bool PlanCache::shouldCacheQuery(const CanonicalQuery& query) {
- const LiteParsedQuery& lpq = query.getParsed();
+ const QueryRequest& qr = query.getQueryRequest();
const MatchExpression* expr = query.root();
// Collection scan
// No sort order requested
- if (lpq.getSort().isEmpty() && expr->matchType() == MatchExpression::AND &&
+ if (qr.getSort().isEmpty() && expr->matchType() == MatchExpression::AND &&
expr->numChildren() == 0) {
return false;
}
// Hint provided
- if (!lpq.getHint().isEmpty()) {
+ if (!qr.getHint().isEmpty()) {
return false;
}
// Min provided
// Min queries are a special case of hinted queries.
- if (!lpq.getMin().isEmpty()) {
+ if (!qr.getMin().isEmpty()) {
return false;
}
// Max provided
// Similar to min, max queries are a special case of hinted queries.
- if (!lpq.getMax().isEmpty()) {
+ if (!qr.getMax().isEmpty()) {
return false;
}
@@ -282,17 +282,17 @@ bool PlanCache::shouldCacheQuery(const CanonicalQuery& query) {
// If the explain report is generated by the cached plan runner using
// stale information from the cache for the losing plans, allPlans would
// simply be wrong.
- if (lpq.isExplain()) {
+ if (qr.isExplain()) {
return false;
}
// Tailable cursors won't get cached, just turn into collscans.
- if (query.getParsed().isTailable()) {
+ if (query.getQueryRequest().isTailable()) {
return false;
}
// Snapshot is really a hint.
- if (query.getParsed().isSnapshot()) {
+ if (query.getQueryRequest().isSnapshot()) {
return false;
}
@@ -531,7 +531,7 @@ void PlanCache::encodeKeyForMatch(const MatchExpression* tree, StringBuilder* ke
/**
* Encodes sort order into cache key.
* Sort order is normalized because it provided by
- * LiteParsedQuery.
+ * QueryRequest.
*/
void PlanCache::encodeKeyForSort(const BSONObj& sortObj, StringBuilder* keyBuilder) const {
if (sortObj.isEmpty()) {
@@ -544,7 +544,7 @@ void PlanCache::encodeKeyForSort(const BSONObj& sortObj, StringBuilder* keyBuild
while (it.more()) {
BSONElement elt = it.next();
// $meta text score
- if (LiteParsedQuery::isTextScoreMeta(elt)) {
+ if (QueryRequest::isTextScoreMeta(elt)) {
*keyBuilder << "t";
}
// Ascending
@@ -636,14 +636,14 @@ Status PlanCache::add(const CanonicalQuery& query,
}
PlanCacheEntry* entry = new PlanCacheEntry(solns, why);
- const LiteParsedQuery& pq = query.getParsed();
- entry->query = pq.getFilter().getOwned();
- entry->sort = pq.getSort().getOwned();
+ const QueryRequest& qr = query.getQueryRequest();
+ entry->query = qr.getFilter().getOwned();
+ entry->sort = qr.getSort().getOwned();
// Strip projections on $-prefixed fields, as these are added by internal callers of the query
// system and are not considered part of the user projection.
BSONObjBuilder projBuilder;
- for (auto elem : pq.getProj()) {
+ for (auto elem : qr.getProj()) {
if (elem.fieldName()[0] == '$') {
continue;
}
@@ -716,8 +716,8 @@ void PlanCache::clear() {
PlanCacheKey PlanCache::computeKey(const CanonicalQuery& cq) const {
StringBuilder keyBuilder;
encodeKeyForMatch(cq.root(), &keyBuilder);
- encodeKeyForSort(cq.getParsed().getSort(), &keyBuilder);
- encodeKeyForProj(cq.getParsed().getProj(), &keyBuilder);
+ encodeKeyForSort(cq.getQueryRequest().getSort(), &keyBuilder);
+ encodeKeyForProj(cq.getQueryRequest().getProj(), &keyBuilder);
return keyBuilder.str();
}
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index 708ff2b69e6..23eef65734b 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -66,10 +66,10 @@ unique_ptr<CanonicalQuery> canonicalize(const BSONObj& queryObj) {
QueryTestServiceContext serviceContext;
auto txn = serviceContext.makeOperationContext();
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(queryObj);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(queryObj);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -85,12 +85,12 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
QueryTestServiceContext serviceContext;
auto txn = serviceContext.makeOperationContext();
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(fromjson(queryStr));
- lpq->setSort(fromjson(sortStr));
- lpq->setProj(fromjson(projStr));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson(queryStr));
+ qr->setSort(fromjson(sortStr));
+ qr->setProj(fromjson(projStr));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -106,21 +106,21 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
QueryTestServiceContext serviceContext;
auto txn = serviceContext.makeOperationContext();
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(fromjson(queryStr));
- lpq->setSort(fromjson(sortStr));
- lpq->setProj(fromjson(projStr));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson(queryStr));
+ qr->setSort(fromjson(sortStr));
+ qr->setProj(fromjson(projStr));
if (skip) {
- lpq->setSkip(skip);
+ qr->setSkip(skip);
}
if (limit) {
- lpq->setLimit(limit);
+ qr->setLimit(limit);
}
- lpq->setHint(fromjson(hintStr));
- lpq->setMin(fromjson(minStr));
- lpq->setMax(fromjson(maxStr));
+ qr->setHint(fromjson(hintStr));
+ qr->setMin(fromjson(minStr));
+ qr->setMax(fromjson(maxStr));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -138,23 +138,23 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
QueryTestServiceContext serviceContext;
auto txn = serviceContext.makeOperationContext();
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(fromjson(queryStr));
- lpq->setSort(fromjson(sortStr));
- lpq->setProj(fromjson(projStr));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson(queryStr));
+ qr->setSort(fromjson(sortStr));
+ qr->setProj(fromjson(projStr));
if (skip) {
- lpq->setSkip(skip);
+ qr->setSkip(skip);
}
if (limit) {
- lpq->setLimit(limit);
+ qr->setLimit(limit);
}
- lpq->setHint(fromjson(hintStr));
- lpq->setMin(fromjson(minStr));
- lpq->setMax(fromjson(maxStr));
- lpq->setSnapshot(snapshot);
- lpq->setExplain(explain);
+ qr->setHint(fromjson(hintStr));
+ qr->setMin(fromjson(minStr));
+ qr->setMax(fromjson(maxStr));
+ qr->setSnapshot(snapshot);
+ qr->setExplain(explain);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
@@ -386,8 +386,8 @@ TEST(PlanCacheTest, ShouldNotCacheQueryExplain) {
false, // snapshot
true // explain
));
- const LiteParsedQuery& pq = cq->getParsed();
- ASSERT_TRUE(pq.isExplain());
+ const QueryRequest& qr = cq->getQueryRequest();
+ ASSERT_TRUE(qr.isExplain());
assertShouldNotCacheQuery(*cq);
}
@@ -538,22 +538,22 @@ protected:
solns.clear();
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(query);
- lpq->setSort(sort);
- lpq->setProj(proj);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(query);
+ qr->setSort(sort);
+ qr->setProj(proj);
if (skip) {
- lpq->setSkip(skip);
+ qr->setSkip(skip);
}
if (limit) {
- lpq->setLimit(limit);
+ qr->setLimit(limit);
}
- lpq->setHint(hint);
- lpq->setMin(minObj);
- lpq->setMax(maxObj);
- lpq->setSnapshot(snapshot);
+ qr->setHint(hint);
+ qr->setMin(minObj);
+ qr->setMax(maxObj);
+ qr->setSnapshot(snapshot);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
Status s = QueryPlanner::plan(*statusWithCQ.getValue(), params, &solns);
ASSERT_OK(s);
@@ -627,12 +627,12 @@ protected:
QueryTestServiceContext serviceContext;
auto txn = serviceContext.makeOperationContext();
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(query);
- lpq->setSort(sort);
- lpq->setProj(proj);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(query);
+ qr->setSort(sort);
+ qr->setProj(proj);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn.get(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn.get(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> scopedCq = std::move(statusWithCQ.getValue());
diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp
index 4a121926b9e..6a74f988ad7 100644
--- a/src/mongo/db/query/plan_executor.cpp
+++ b/src/mongo/db/query/plan_executor.cpp
@@ -177,7 +177,7 @@ PlanExecutor::PlanExecutor(OperationContext* opCtx,
_ns = _collection->ns().ns();
} else {
invariant(NULL != _cq.get());
- _ns = _cq->getParsed().ns();
+ _ns = _cq->getQueryRequest().ns();
}
}
diff --git a/src/mongo/db/query/planner_access.cpp b/src/mongo/db/query/planner_access.cpp
index 349a25b99f7..e4e9c6ec416 100644
--- a/src/mongo/db/query/planner_access.cpp
+++ b/src/mongo/db/query/planner_access.cpp
@@ -124,11 +124,12 @@ QuerySolutionNode* QueryPlannerAccess::makeCollectionScan(const CanonicalQuery&
csn->name = query.ns();
csn->filter = query.root()->shallowClone();
csn->tailable = tailable;
- csn->maxScan = query.getParsed().getMaxScan();
+ csn->maxScan = query.getQueryRequest().getMaxScan();
// If the hint is {$natural: +-1} this changes the direction of the collection scan.
- if (!query.getParsed().getHint().isEmpty()) {
- BSONElement natural = dps::extractElementAtPath(query.getParsed().getHint(), "$natural");
+ if (!query.getQueryRequest().getHint().isEmpty()) {
+ BSONElement natural =
+ dps::extractElementAtPath(query.getQueryRequest().getHint(), "$natural");
if (!natural.eoo()) {
csn->direction = natural.numberInt() >= 0 ? 1 : -1;
}
@@ -136,7 +137,7 @@ QuerySolutionNode* QueryPlannerAccess::makeCollectionScan(const CanonicalQuery&
// The sort can specify $natural as well. The sort direction should override the hint
// direction if both are specified.
- const BSONObj& sortObj = query.getParsed().getSort();
+ const BSONObj& sortObj = query.getQueryRequest().getSort();
if (!sortObj.isEmpty()) {
BSONElement natural = dps::extractElementAtPath(sortObj, "$natural");
if (!natural.eoo()) {
@@ -209,8 +210,8 @@ QuerySolutionNode* QueryPlannerAccess::makeLeafNode(
isn->indexKeyPattern = index.keyPattern;
isn->indexIsMultiKey = index.multikey;
isn->bounds.fields.resize(index.keyPattern.nFields());
- isn->maxScan = query.getParsed().getMaxScan();
- isn->addKeyMetadata = query.getParsed().returnKey();
+ isn->maxScan = query.getQueryRequest().getMaxScan();
+ isn->addKeyMetadata = query.getQueryRequest().returnKey();
// Get the ixtag->pos-th element of the index key pattern.
// TODO: cache this instead/with ixtag->pos?
@@ -984,7 +985,7 @@ QuerySolutionNode* QueryPlannerAccess::buildIndexedAnd(const CanonicalQuery& que
for (size_t i = 0; i < ahn->children.size(); ++i) {
ahn->children[i]->computeProperties();
const BSONObjSet& sorts = ahn->children[i]->getSort();
- if (sorts.end() != sorts.find(query.getParsed().getSort())) {
+ if (sorts.end() != sorts.find(query.getQueryRequest().getSort())) {
std::swap(ahn->children[i], ahn->children.back());
break;
}
@@ -1085,8 +1086,8 @@ QuerySolutionNode* QueryPlannerAccess::buildIndexedOr(const CanonicalQuery& quer
} else {
bool shouldMergeSort = false;
- if (!query.getParsed().getSort().isEmpty()) {
- const BSONObj& desiredSort = query.getParsed().getSort();
+ if (!query.getQueryRequest().getSort().isEmpty()) {
+ const BSONObj& desiredSort = query.getQueryRequest().getSort();
// If there exists a sort order that is present in each child, we can merge them and
// maintain that sort order / those sort orders.
@@ -1117,7 +1118,7 @@ QuerySolutionNode* QueryPlannerAccess::buildIndexedOr(const CanonicalQuery& quer
if (shouldMergeSort) {
MergeSortNode* msn = new MergeSortNode();
- msn->sort = query.getParsed().getSort();
+ msn->sort = query.getQueryRequest().getSort();
msn->children.swap(ixscanNodes);
orResult = msn;
} else {
@@ -1246,8 +1247,8 @@ QuerySolutionNode* QueryPlannerAccess::scanWholeIndex(const IndexEntry& index,
unique_ptr<IndexScanNode> isn = make_unique<IndexScanNode>();
isn->indexKeyPattern = index.keyPattern;
isn->indexIsMultiKey = index.multikey;
- isn->maxScan = query.getParsed().getMaxScan();
- isn->addKeyMetadata = query.getParsed().returnKey();
+ isn->maxScan = query.getQueryRequest().getMaxScan();
+ isn->addKeyMetadata = query.getQueryRequest().returnKey();
IndexBoundsBuilder::allValuesBounds(index.keyPattern, &isn->bounds);
@@ -1383,8 +1384,8 @@ QuerySolutionNode* QueryPlannerAccess::makeIndexScan(const IndexEntry& index,
isn->indexKeyPattern = index.keyPattern;
isn->indexIsMultiKey = index.multikey;
isn->direction = 1;
- isn->maxScan = query.getParsed().getMaxScan();
- isn->addKeyMetadata = query.getParsed().returnKey();
+ isn->maxScan = query.getQueryRequest().getMaxScan();
+ isn->addKeyMetadata = query.getQueryRequest().returnKey();
isn->bounds.isSimpleRange = true;
isn->bounds.startKey = startKey;
isn->bounds.endKey = endKey;
diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp
index e7ab375b03c..0e1dcb60e28 100644
--- a/src/mongo/db/query/planner_analysis.cpp
+++ b/src/mongo/db/query/planner_analysis.cpp
@@ -350,7 +350,7 @@ bool QueryPlannerAnalysis::explodeForSort(const CanonicalQuery& query,
getLeafNodes(*solnRoot, &leafNodes);
- const BSONObj& desiredSort = query.getParsed().getSort();
+ const BSONObj& desiredSort = query.getQueryRequest().getSort();
// How many scan leaves will result from our expansion?
size_t totalNumScans = 0;
@@ -463,8 +463,8 @@ QuerySolutionNode* QueryPlannerAnalysis::analyzeSort(const CanonicalQuery& query
bool* blockingSortOut) {
*blockingSortOut = false;
- const LiteParsedQuery& lpq = query.getParsed();
- const BSONObj& sortObj = lpq.getSort();
+ const QueryRequest& qr = query.getQueryRequest();
+ const BSONObj& sortObj = qr.getSort();
if (sortObj.isEmpty()) {
return solnRoot;
@@ -524,7 +524,7 @@ QuerySolutionNode* QueryPlannerAnalysis::analyzeSort(const CanonicalQuery& query
// And build the full sort stage. The sort stage has to have a sort key generating stage
// as its child, supplying it with the appropriate sort keys.
SortKeyGeneratorNode* keyGenNode = new SortKeyGeneratorNode();
- keyGenNode->queryObj = lpq.getFilter();
+ keyGenNode->queryObj = qr.getFilter();
keyGenNode->sortSpec = sortObj;
keyGenNode->children.push_back(solnRoot);
solnRoot = keyGenNode;
@@ -536,19 +536,19 @@ QuerySolutionNode* QueryPlannerAnalysis::analyzeSort(const CanonicalQuery& query
// When setting the limit on the sort, we need to consider both
// the limit N and skip count M. The sort should return an ordered list
// N + M items so that the skip stage can discard the first M results.
- if (lpq.getLimit()) {
+ if (qr.getLimit()) {
// We have a true limit. The limit can be combined with the SORT stage.
sort->limit =
- static_cast<size_t>(*lpq.getLimit()) + static_cast<size_t>(lpq.getSkip().value_or(0));
- } else if (lpq.getNToReturn()) {
+ static_cast<size_t>(*qr.getLimit()) + static_cast<size_t>(qr.getSkip().value_or(0));
+ } else if (qr.getNToReturn()) {
// We have an ntoreturn specified by an OP_QUERY style find. This is used
// by clients to mean both batchSize and limit.
//
// Overflow here would be bad and could cause a nonsense limit. Cast
// skip and limit values to unsigned ints to make sure that the
// sum is never stored as signed. (See SERVER-13537).
- sort->limit = static_cast<size_t>(*lpq.getNToReturn()) +
- static_cast<size_t>(lpq.getSkip().value_or(0));
+ sort->limit =
+ static_cast<size_t>(*qr.getNToReturn()) + static_cast<size_t>(qr.getSkip().value_or(0));
// This is a SORT with a limit. The wire protocol has a single quantity
// called "numToReturn" which could mean either limit or batchSize.
@@ -573,7 +573,7 @@ QuerySolutionNode* QueryPlannerAnalysis::analyzeSort(const CanonicalQuery& query
//
// We must also add an ENSURE_SORTED node above the OR to ensure that the final results are
// in correct sorted order, which may not be true if the data is concurrently modified.
- if (lpq.wantMore() && params.options & QueryPlannerParams::SPLIT_LIMITED_SORT &&
+ if (qr.wantMore() && params.options & QueryPlannerParams::SPLIT_LIMITED_SORT &&
!QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT) &&
!QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO) &&
!QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)) {
@@ -660,7 +660,7 @@ QuerySolution* QueryPlannerAnalysis::analyzeDataAccess(const CanonicalQuery& que
bool hasAndHashStage = hasNode(solnRoot, STAGE_AND_HASH);
soln->hasBlockingStage = hasSortStage || hasAndHashStage;
- const LiteParsedQuery& lpq = query.getParsed();
+ const QueryRequest& qr = query.getQueryRequest();
// If we can (and should), add the keep mutations stage.
@@ -685,7 +685,7 @@ QuerySolution* QueryPlannerAnalysis::analyzeDataAccess(const CanonicalQuery& que
const bool cannotKeepFlagged = hasNode(solnRoot, STAGE_TEXT) ||
hasNode(solnRoot, STAGE_GEO_NEAR_2D) || hasNode(solnRoot, STAGE_GEO_NEAR_2DSPHERE) ||
- (!lpq.getSort().isEmpty() && !hasSortStage) || hasNotRootSort;
+ (!qr.getSort().isEmpty() && !hasSortStage) || hasNotRootSort;
// Only index intersection stages ever produce flagged results.
const bool couldProduceFlagged = hasAndHashStage || hasNode(solnRoot, STAGE_AND_SORTED);
@@ -802,8 +802,8 @@ QuerySolution* QueryPlannerAnalysis::analyzeDataAccess(const CanonicalQuery& que
// generate the sort key computed data.
if (!hasSortStage && query.getProj()->wantSortKey()) {
SortKeyGeneratorNode* keyGenNode = new SortKeyGeneratorNode();
- keyGenNode->queryObj = lpq.getFilter();
- keyGenNode->sortSpec = lpq.getSort();
+ keyGenNode->queryObj = qr.getFilter();
+ keyGenNode->sortSpec = qr.getSort();
keyGenNode->children.push_back(solnRoot);
solnRoot = keyGenNode;
}
@@ -812,7 +812,7 @@ QuerySolution* QueryPlannerAnalysis::analyzeDataAccess(const CanonicalQuery& que
ProjectionNode* projNode = new ProjectionNode(*query.getProj());
projNode->children.push_back(solnRoot);
projNode->fullExpression = query.root();
- projNode->projection = lpq.getProj();
+ projNode->projection = qr.getProj();
projNode->projType = projType;
projNode->coveredKeyObj = coveredKeyObj;
solnRoot = projNode;
@@ -825,9 +825,9 @@ QuerySolution* QueryPlannerAnalysis::analyzeDataAccess(const CanonicalQuery& que
}
}
- if (lpq.getSkip()) {
+ if (qr.getSkip()) {
SkipNode* skip = new SkipNode();
- skip->skip = *lpq.getSkip();
+ skip->skip = *qr.getSkip();
skip->children.push_back(solnRoot);
solnRoot = skip;
}
@@ -839,16 +839,16 @@ QuerySolution* QueryPlannerAnalysis::analyzeDataAccess(const CanonicalQuery& que
if (!hasSortStage) {
// We don't have a sort stage. This means that, if there is a limit, we will have
// to enforce it ourselves since it's not handled inside SORT.
- if (lpq.getLimit()) {
+ if (qr.getLimit()) {
LimitNode* limit = new LimitNode();
- limit->limit = *lpq.getLimit();
+ limit->limit = *qr.getLimit();
limit->children.push_back(solnRoot);
solnRoot = limit;
- } else if (lpq.getNToReturn() && !lpq.wantMore()) {
+ } else if (qr.getNToReturn() && !qr.wantMore()) {
// We have a "legacy limit", i.e. a negative ntoreturn value from an OP_QUERY style
// find.
LimitNode* limit = new LimitNode();
- limit->limit = *lpq.getNToReturn();
+ limit->limit = *qr.getNToReturn();
limit->children.push_back(solnRoot);
solnRoot = limit;
}
diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp
index 49905949142..2812cddca31 100644
--- a/src/mongo/db/query/query_planner.cpp
+++ b/src/mongo/db/query/query_planner.cpp
@@ -232,7 +232,7 @@ QuerySolution* buildWholeIXSoln(const IndexEntry& index,
}
bool providesSort(const CanonicalQuery& query, const BSONObj& kp) {
- return query.getParsed().getSort().isPrefixOf(kp);
+ return query.getQueryRequest().getSort().isPrefixOf(kp);
}
// static
@@ -441,7 +441,7 @@ Status QueryPlanner::plan(const CanonicalQuery& query,
}
const bool canTableScan = !(params.options & QueryPlannerParams::NO_TABLE_SCAN);
- const bool isTailable = query.getParsed().isTailable();
+ const bool isTailable = query.getQueryRequest().isTailable();
// If the query requests a tailable cursor, the only solution is a collscan + filter with
// tailable set on the collscan. TODO: This is a policy departure. Previously I think you
@@ -460,9 +460,10 @@ Status QueryPlanner::plan(const CanonicalQuery& query,
// The hint or sort can be $natural: 1. If this happens, output a collscan. If both
// a $natural hint and a $natural sort are specified, then the direction of the collscan
// is determined by the sign of the sort (not the sign of the hint).
- if (!query.getParsed().getHint().isEmpty() || !query.getParsed().getSort().isEmpty()) {
- BSONObj hintObj = query.getParsed().getHint();
- BSONObj sortObj = query.getParsed().getSort();
+ if (!query.getQueryRequest().getHint().isEmpty() ||
+ !query.getQueryRequest().getSort().isEmpty()) {
+ BSONObj hintObj = query.getQueryRequest().getHint();
+ BSONObj sortObj = query.getQueryRequest().getSort();
BSONElement naturalHint = dps::extractElementAtPath(hintObj, "$natural");
BSONElement naturalSort = dps::extractElementAtPath(sortObj, "$natural");
@@ -471,8 +472,8 @@ Status QueryPlanner::plan(const CanonicalQuery& query,
if (!naturalHint.eoo() || (!naturalSort.eoo() && hintObj.isEmpty())) {
LOG(5) << "Forcing a table scan due to hinted $natural\n";
// min/max are incompatible with $natural.
- if (canTableScan && query.getParsed().getMin().isEmpty() &&
- query.getParsed().getMax().isEmpty()) {
+ if (canTableScan && query.getQueryRequest().getMin().isEmpty() &&
+ query.getQueryRequest().getMax().isEmpty()) {
QuerySolution* soln = buildCollscanSoln(query, isTailable, params);
if (NULL != soln) {
out->push_back(soln);
@@ -499,7 +500,7 @@ Status QueryPlanner::plan(const CanonicalQuery& query,
// requested in the query.
BSONObj hintIndex;
if (!params.indexFiltersApplied) {
- hintIndex = query.getParsed().getHint();
+ hintIndex = query.getQueryRequest().getHint();
}
// If snapshot is set, default to collscanning. If the query param SNAPSHOT_USE_ID is set,
@@ -508,7 +509,7 @@ Status QueryPlanner::plan(const CanonicalQuery& query,
//
// Don't do this if the query is a geonear or text as as text search queries must be answered
// using full text indices and geoNear queries must be answered using geospatial indices.
- if (query.getParsed().isSnapshot() &&
+ if (query.getQueryRequest().isSnapshot() &&
!QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR) &&
!QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)) {
const bool useIXScan = params.options & QueryPlannerParams::SNAPSHOT_USE_ID;
@@ -570,9 +571,10 @@ Status QueryPlanner::plan(const CanonicalQuery& query,
// Deal with the .min() and .max() query options. If either exist we can only use an index
// that matches the object inside.
- if (!query.getParsed().getMin().isEmpty() || !query.getParsed().getMax().isEmpty()) {
- BSONObj minObj = query.getParsed().getMin();
- BSONObj maxObj = query.getParsed().getMax();
+ if (!query.getQueryRequest().getMin().isEmpty() ||
+ !query.getQueryRequest().getMax().isEmpty()) {
+ BSONObj minObj = query.getQueryRequest().getMin();
+ BSONObj maxObj = query.getQueryRequest().getMax();
// The unfinished siblings of these objects may not be proper index keys because they
// may be empty objects or have field names. When an index is picked to use for the
@@ -672,7 +674,7 @@ Status QueryPlanner::plan(const CanonicalQuery& query,
//
// TEXT and GEO_NEAR are special because they require the use of a text/geo index in order
// to be evaluated correctly. Stripping these "mandatory assignments" is therefore invalid.
- if (query.getParsed().getProj().isEmpty() &&
+ if (query.getQueryRequest().getProj().isEmpty() &&
!QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR) &&
!QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)) {
QueryPlannerIXSelect::stripUnneededAssignments(query.root(), relevantIndices);
@@ -816,7 +818,7 @@ Status QueryPlanner::plan(const CanonicalQuery& query,
// If a sort order is requested, there may be an index that provides it, even if that
// index is not over any predicates in the query.
//
- if (!query.getParsed().getSort().isEmpty() &&
+ if (!query.getQueryRequest().getSort().isEmpty() &&
!QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR) &&
!QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT)) {
// See if we have a sort provided from an index already.
diff --git a/src/mongo/db/query/query_planner_test.cpp b/src/mongo/db/query/query_planner_test.cpp
index de70d351035..a3e4477a568 100644
--- a/src/mongo/db/query/query_planner_test.cpp
+++ b/src/mongo/db/query/query_planner_test.cpp
@@ -1479,7 +1479,7 @@ TEST_F(QueryPlannerTest, CompoundIndexWithEqualityPredicatesProvidesSort) {
//
TEST_F(QueryPlannerTest, SortLimit) {
- // Negative limit indicates hard limit - see lite_parsed_query.cpp
+ // Negative limit indicates hard limit - see query_request.cpp
runQuerySortProjSkipNToReturn(BSONObj(), fromjson("{a: 1}"), BSONObj(), 0, -3);
assertNumSolutions(1U);
assertSolutionExists(
@@ -4148,10 +4148,10 @@ TEST_F(QueryPlannerTest, CacheDataFromTaggedTreeFailsOnBadInput) {
// No relevant index matching the index tag.
relevantIndices.push_back(IndexEntry(BSON("a" << 1)));
- auto lpq = stdx::make_unique<LiteParsedQuery>(NamespaceString("test.collection"));
- lpq->setFilter(BSON("a" << 3));
+ auto qr = stdx::make_unique<QueryRequest>(NamespaceString("test.collection"));
+ qr->setFilter(BSON("a" << 3));
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ CanonicalQuery::canonicalize(txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> scopedCq = std::move(statusWithCQ.getValue());
scopedCq->root()->setTag(new IndexTag(1));
@@ -4164,10 +4164,10 @@ TEST_F(QueryPlannerTest, CacheDataFromTaggedTreeFailsOnBadInput) {
TEST_F(QueryPlannerTest, TagAccordingToCacheFailsOnBadInput) {
const NamespaceString nss("test.collection");
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(BSON("a" << 3));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(BSON("a" << 3));
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ CanonicalQuery::canonicalize(txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> scopedCq = std::move(statusWithCQ.getValue());
@@ -4194,10 +4194,10 @@ TEST_F(QueryPlannerTest, TagAccordingToCacheFailsOnBadInput) {
ASSERT_OK(s);
// Regenerate canonical query in order to clear tags.
- auto newLPQ = stdx::make_unique<LiteParsedQuery>(nss);
- newLPQ->setFilter(BSON("a" << 3));
+ auto newQR = stdx::make_unique<QueryRequest>(nss);
+ newQR->setFilter(BSON("a" << 3));
statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(newLPQ), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(newQR), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
scopedCq = std::move(statusWithCQ.getValue());
diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp
index c49df6ab106..46762b3eb4a 100644
--- a/src/mongo/db/query/query_planner_test_fixture.cpp
+++ b/src/mongo/db/query/query_planner_test_fixture.cpp
@@ -219,27 +219,27 @@ void QueryPlannerTest::runQueryFull(const BSONObj& query,
solns.clear();
cq.reset();
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(query);
- lpq->setSort(sort);
- lpq->setProj(proj);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(query);
+ qr->setSort(sort);
+ qr->setProj(proj);
if (skip) {
- lpq->setSkip(skip);
+ qr->setSkip(skip);
}
if (ntoreturn) {
if (ntoreturn < 0) {
ASSERT_NE(ntoreturn, std::numeric_limits<long long>::min());
ntoreturn = -ntoreturn;
- lpq->setWantMore(false);
+ qr->setWantMore(false);
}
- lpq->setNToReturn(ntoreturn);
+ qr->setNToReturn(ntoreturn);
}
- lpq->setHint(hint);
- lpq->setMin(minObj);
- lpq->setMax(maxObj);
- lpq->setSnapshot(snapshot);
+ qr->setHint(hint);
+ qr->setMin(minObj);
+ qr->setMax(maxObj);
+ qr->setSnapshot(snapshot);
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn(), std::move(lpq), ExtensionsCallbackNoop());
+ CanonicalQuery::canonicalize(txn(), std::move(qr), ExtensionsCallbackNoop());
ASSERT_OK(statusWithCQ.getStatus());
cq = std::move(statusWithCQ.getValue());
@@ -296,27 +296,27 @@ void QueryPlannerTest::runInvalidQueryFull(const BSONObj& query,
solns.clear();
cq.reset();
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(query);
- lpq->setSort(sort);
- lpq->setProj(proj);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(query);
+ qr->setSort(sort);
+ qr->setProj(proj);
if (skip) {
- lpq->setSkip(skip);
+ qr->setSkip(skip);
}
if (ntoreturn) {
if (ntoreturn < 0) {
ASSERT_NE(ntoreturn, std::numeric_limits<long long>::min());
ntoreturn = -ntoreturn;
- lpq->setWantMore(false);
+ qr->setWantMore(false);
}
- lpq->setNToReturn(ntoreturn);
+ qr->setNToReturn(ntoreturn);
}
- lpq->setHint(hint);
- lpq->setMin(minObj);
- lpq->setMax(maxObj);
- lpq->setSnapshot(snapshot);
+ qr->setHint(hint);
+ qr->setMin(minObj);
+ qr->setMax(maxObj);
+ qr->setSnapshot(snapshot);
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn(), std::move(lpq), ExtensionsCallbackNoop());
+ CanonicalQuery::canonicalize(txn(), std::move(qr), ExtensionsCallbackNoop());
ASSERT_OK(statusWithCQ.getStatus());
cq = std::move(statusWithCQ.getValue());
@@ -331,11 +331,11 @@ void QueryPlannerTest::runQueryAsCommand(const BSONObj& cmdObj) {
invariant(nss.isValid());
const bool isExplain = false;
- std::unique_ptr<LiteParsedQuery> lpq(
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain)));
+ std::unique_ptr<QueryRequest> qr(
+ assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn(), std::move(lpq), ExtensionsCallbackNoop());
+ CanonicalQuery::canonicalize(txn(), std::move(qr), ExtensionsCallbackNoop());
ASSERT_OK(statusWithCQ.getStatus());
cq = std::move(statusWithCQ.getValue());
diff --git a/src/mongo/db/query/lite_parsed_query.cpp b/src/mongo/db/query/query_request.cpp
index 4e7e036a4cc..f42d97d326d 100644
--- a/src/mongo/db/query/lite_parsed_query.cpp
+++ b/src/mongo/db/query/query_request.cpp
@@ -28,7 +28,7 @@
#include "mongo/platform/basic.h"
-#include "mongo/db/query/lite_parsed_query.h"
+#include "mongo/db/query/query_request.h"
#include "mongo/base/status.h"
#include "mongo/base/status_with.h"
@@ -44,20 +44,20 @@ namespace mongo {
using std::string;
using std::unique_ptr;
-const std::string LiteParsedQuery::kUnwrappedReadPrefField("$queryOptions");
-const std::string LiteParsedQuery::kWrappedReadPrefField("$readPreference");
+const std::string QueryRequest::kUnwrappedReadPrefField("$queryOptions");
+const std::string QueryRequest::kWrappedReadPrefField("$readPreference");
-const char LiteParsedQuery::cmdOptionMaxTimeMS[] = "maxTimeMS";
-const char LiteParsedQuery::queryOptionMaxTimeMS[] = "$maxTimeMS";
+const char QueryRequest::cmdOptionMaxTimeMS[] = "maxTimeMS";
+const char QueryRequest::queryOptionMaxTimeMS[] = "$maxTimeMS";
-const string LiteParsedQuery::metaGeoNearDistance("geoNearDistance");
-const string LiteParsedQuery::metaGeoNearPoint("geoNearPoint");
-const string LiteParsedQuery::metaIndexKey("indexKey");
-const string LiteParsedQuery::metaRecordId("recordId");
-const string LiteParsedQuery::metaSortKey("sortKey");
-const string LiteParsedQuery::metaTextScore("textScore");
+const string QueryRequest::metaGeoNearDistance("geoNearDistance");
+const string QueryRequest::metaGeoNearPoint("geoNearPoint");
+const string QueryRequest::metaIndexKey("indexKey");
+const string QueryRequest::metaRecordId("recordId");
+const string QueryRequest::metaSortKey("sortKey");
+const string QueryRequest::metaTextScore("textScore");
-const long long LiteParsedQuery::kDefaultBatchSize = 101;
+const long long QueryRequest::kDefaultBatchSize = 101;
namespace {
@@ -100,17 +100,17 @@ const char kOptionsField[] = "options";
} // namespace
-const char LiteParsedQuery::kFindCommandName[] = "find";
-const char LiteParsedQuery::kShardVersionField[] = "shardVersion";
+const char QueryRequest::kFindCommandName[] = "find";
+const char QueryRequest::kShardVersionField[] = "shardVersion";
-LiteParsedQuery::LiteParsedQuery(NamespaceString nss) : _nss(std::move(nss)) {}
+QueryRequest::QueryRequest(NamespaceString nss) : _nss(std::move(nss)) {}
// static
-StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(NamespaceString nss,
- const BSONObj& cmdObj,
- bool isExplain) {
- unique_ptr<LiteParsedQuery> pq(new LiteParsedQuery(std::move(nss)));
- pq->_explain = isExplain;
+StatusWith<unique_ptr<QueryRequest>> QueryRequest::makeFromFindCommand(NamespaceString nss,
+ const BSONObj& cmdObj,
+ bool isExplain) {
+ unique_ptr<QueryRequest> qr(new QueryRequest(std::move(nss)));
+ qr->_explain = isExplain;
// Parse the command BSON by looping through one element at a time.
BSONObjIterator it(cmdObj);
@@ -128,21 +128,21 @@ StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(Nam
return status;
}
- pq->_filter = el.Obj().getOwned();
+ qr->_filter = el.Obj().getOwned();
} else if (str::equals(fieldName, kProjectionField)) {
Status status = checkFieldType(el, Object);
if (!status.isOK()) {
return status;
}
- pq->_proj = el.Obj().getOwned();
+ qr->_proj = el.Obj().getOwned();
} else if (str::equals(fieldName, kSortField)) {
Status status = checkFieldType(el, Object);
if (!status.isOK()) {
return status;
}
- pq->_sort = el.Obj().getOwned();
+ qr->_sort = el.Obj().getOwned();
} else if (str::equals(fieldName, kHintField)) {
BSONObj hintObj;
if (Object == el.type()) {
@@ -154,7 +154,7 @@ StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(Nam
"hint must be either a string or nested object");
}
- pq->_hint = hintObj;
+ qr->_hint = hintObj;
} else if (str::equals(fieldName, repl::ReadConcernArgs::kReadConcernFieldName.c_str())) {
// Read concern parsing is handled elsewhere, but we store a copy here.
Status status = checkFieldType(el, Object);
@@ -162,7 +162,7 @@ StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(Nam
return status;
}
- pq->_readConcern = el.Obj().getOwned();
+ qr->_readConcern = el.Obj().getOwned();
} else if (str::equals(fieldName, kCollationField)) {
// Collation parsing is handled elsewhere, but we store a copy here.
Status status = checkFieldType(el, Object);
@@ -170,7 +170,7 @@ StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(Nam
return status;
}
- pq->_collation = el.Obj().getOwned();
+ qr->_collation = el.Obj().getOwned();
} else if (str::equals(fieldName, kSkipField)) {
if (!el.isNumber()) {
str::stream ss;
@@ -183,7 +183,7 @@ StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(Nam
// A skip value of 0 means that there is no skip.
if (skip) {
- pq->_skip = skip;
+ qr->_skip = skip;
}
} else if (str::equals(fieldName, kLimitField)) {
if (!el.isNumber()) {
@@ -197,7 +197,7 @@ StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(Nam
// A limit value of 0 means that there is no limit.
if (limit) {
- pq->_limit = limit;
+ qr->_limit = limit;
}
} else if (str::equals(fieldName, kBatchSizeField)) {
if (!el.isNumber()) {
@@ -207,7 +207,7 @@ StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(Nam
return Status(ErrorCodes::FailedToParse, ss);
}
- pq->_batchSize = el.numberLong();
+ qr->_batchSize = el.numberLong();
} else if (str::equals(fieldName, kNToReturnField)) {
if (!el.isNumber()) {
str::stream ss;
@@ -216,21 +216,21 @@ StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(Nam
return Status(ErrorCodes::FailedToParse, ss);
}
- pq->_ntoreturn = el.numberLong();
+ qr->_ntoreturn = el.numberLong();
} else if (str::equals(fieldName, kSingleBatchField)) {
Status status = checkFieldType(el, Bool);
if (!status.isOK()) {
return status;
}
- pq->_wantMore = !el.boolean();
+ qr->_wantMore = !el.boolean();
} else if (str::equals(fieldName, kCommentField)) {
Status status = checkFieldType(el, String);
if (!status.isOK()) {
return status;
}
- pq->_comment = el.str();
+ qr->_comment = el.str();
} else if (str::equals(fieldName, kMaxScanField)) {
if (!el.isNumber()) {
str::stream ss;
@@ -239,91 +239,91 @@ StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(Nam
return Status(ErrorCodes::FailedToParse, ss);
}
- pq->_maxScan = el.numberInt();
+ qr->_maxScan = el.numberInt();
} else if (str::equals(fieldName, cmdOptionMaxTimeMS)) {
StatusWith<int> maxTimeMS = parseMaxTimeMS(el);
if (!maxTimeMS.isOK()) {
return maxTimeMS.getStatus();
}
- pq->_maxTimeMS = maxTimeMS.getValue();
+ qr->_maxTimeMS = maxTimeMS.getValue();
} else if (str::equals(fieldName, kMinField)) {
Status status = checkFieldType(el, Object);
if (!status.isOK()) {
return status;
}
- pq->_min = el.Obj().getOwned();
+ qr->_min = el.Obj().getOwned();
} else if (str::equals(fieldName, kMaxField)) {
Status status = checkFieldType(el, Object);
if (!status.isOK()) {
return status;
}
- pq->_max = el.Obj().getOwned();
+ qr->_max = el.Obj().getOwned();
} else if (str::equals(fieldName, kReturnKeyField)) {
Status status = checkFieldType(el, Bool);
if (!status.isOK()) {
return status;
}
- pq->_returnKey = el.boolean();
+ qr->_returnKey = el.boolean();
} else if (str::equals(fieldName, kShowRecordIdField)) {
Status status = checkFieldType(el, Bool);
if (!status.isOK()) {
return status;
}
- pq->_showRecordId = el.boolean();
+ qr->_showRecordId = el.boolean();
} else if (str::equals(fieldName, kSnapshotField)) {
Status status = checkFieldType(el, Bool);
if (!status.isOK()) {
return status;
}
- pq->_snapshot = el.boolean();
+ qr->_snapshot = el.boolean();
} else if (str::equals(fieldName, kTailableField)) {
Status status = checkFieldType(el, Bool);
if (!status.isOK()) {
return status;
}
- pq->_tailable = el.boolean();
+ qr->_tailable = el.boolean();
} else if (str::equals(fieldName, kOplogReplayField)) {
Status status = checkFieldType(el, Bool);
if (!status.isOK()) {
return status;
}
- pq->_oplogReplay = el.boolean();
+ qr->_oplogReplay = el.boolean();
} else if (str::equals(fieldName, kNoCursorTimeoutField)) {
Status status = checkFieldType(el, Bool);
if (!status.isOK()) {
return status;
}
- pq->_noCursorTimeout = el.boolean();
+ qr->_noCursorTimeout = el.boolean();
} else if (str::equals(fieldName, kAwaitDataField)) {
Status status = checkFieldType(el, Bool);
if (!status.isOK()) {
return status;
}
- pq->_awaitData = el.boolean();
+ qr->_awaitData = el.boolean();
} else if (str::equals(fieldName, kPartialResultsField)) {
Status status = checkFieldType(el, Bool);
if (!status.isOK()) {
return status;
}
- pq->_allowPartialResults = el.boolean();
+ qr->_allowPartialResults = el.boolean();
} else if (str::equals(fieldName, kOptionsField)) {
// 3.0.x versions of the shell may generate an explain of a find command with an
// 'options' field. We accept this only if the 'options' field is empty so that
// the shell's explain implementation is forwards compatible.
//
// TODO: Remove for 3.4.
- if (!pq->isExplain()) {
+ if (!qr->isExplain()) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Field '" << kOptionsField
<< "' is only allowed for explain.");
@@ -347,7 +347,7 @@ StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(Nam
if (!status.isOK()) {
return status;
}
- pq->_replicationTerm = el._numberLong();
+ qr->_replicationTerm = el._numberLong();
} else if (!str::startsWith(fieldName, '$')) {
return Status(ErrorCodes::FailedToParse,
str::stream() << "Failed to parse: " << cmdObj.toString() << ". "
@@ -357,23 +357,23 @@ StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::makeFromFindCommand(Nam
}
}
- pq->addMetaProjection();
+ qr->addMetaProjection();
- Status validateStatus = pq->validate();
+ Status validateStatus = qr->validate();
if (!validateStatus.isOK()) {
return validateStatus;
}
- return std::move(pq);
+ return std::move(qr);
}
-BSONObj LiteParsedQuery::asFindCommand() const {
+BSONObj QueryRequest::asFindCommand() const {
BSONObjBuilder bob;
asFindCommand(&bob);
return bob.obj();
}
-void LiteParsedQuery::asFindCommand(BSONObjBuilder* cmdBuilder) const {
+void QueryRequest::asFindCommand(BSONObjBuilder* cmdBuilder) const {
cmdBuilder->append(kFindCommandName, _nss.coll());
if (!_filter.isEmpty()) {
@@ -477,25 +477,25 @@ void LiteParsedQuery::asFindCommand(BSONObjBuilder* cmdBuilder) const {
}
}
-void LiteParsedQuery::addReturnKeyMetaProj() {
+void QueryRequest::addReturnKeyMetaProj() {
BSONObjBuilder projBob;
projBob.appendElements(_proj);
// We use $$ because it's never going to show up in a user's projection.
// The exact text doesn't matter.
- BSONObj indexKey = BSON("$$" << BSON("$meta" << LiteParsedQuery::metaIndexKey));
+ BSONObj indexKey = BSON("$$" << BSON("$meta" << QueryRequest::metaIndexKey));
projBob.append(indexKey.firstElement());
_proj = projBob.obj();
}
-void LiteParsedQuery::addShowRecordIdMetaProj() {
+void QueryRequest::addShowRecordIdMetaProj() {
BSONObjBuilder projBob;
projBob.appendElements(_proj);
- BSONObj metaRecordId = BSON("$recordId" << BSON("$meta" << LiteParsedQuery::metaRecordId));
+ BSONObj metaRecordId = BSON("$recordId" << BSON("$meta" << QueryRequest::metaRecordId));
projBob.append(metaRecordId.firstElement());
_proj = projBob.obj();
}
-Status LiteParsedQuery::validate() const {
+Status QueryRequest::validate() const {
// Min and Max objects must have the same fields.
if (!_min.isEmpty() && !_max.isEmpty()) {
if (!_min.isFieldNamePrefixOf(_max) || (_min.nFields() != _max.nFields())) {
@@ -606,7 +606,7 @@ Status LiteParsedQuery::validate() const {
}
// static
-StatusWith<int> LiteParsedQuery::parseMaxTimeMS(BSONElement maxTimeMSElt) {
+StatusWith<int> QueryRequest::parseMaxTimeMS(BSONElement maxTimeMSElt) {
if (!maxTimeMSElt.eoo() && !maxTimeMSElt.isNumber()) {
return StatusWith<int>(
ErrorCodes::BadValue,
@@ -629,7 +629,7 @@ StatusWith<int> LiteParsedQuery::parseMaxTimeMS(BSONElement maxTimeMSElt) {
}
// static
-bool LiteParsedQuery::isTextScoreMeta(BSONElement elt) {
+bool QueryRequest::isTextScoreMeta(BSONElement elt) {
// elt must be foo: {$meta: "textScore"}
if (mongo::Object != elt.type()) {
return false;
@@ -647,7 +647,7 @@ bool LiteParsedQuery::isTextScoreMeta(BSONElement elt) {
if (mongo::String != metaElt.type()) {
return false;
}
- if (LiteParsedQuery::metaTextScore != metaElt.valuestr()) {
+ if (QueryRequest::metaTextScore != metaElt.valuestr()) {
return false;
}
// must have exactly 1 element
@@ -658,7 +658,7 @@ bool LiteParsedQuery::isTextScoreMeta(BSONElement elt) {
}
// static
-bool LiteParsedQuery::isValidSortOrder(const BSONObj& sortObj) {
+bool QueryRequest::isValidSortOrder(const BSONObj& sortObj) {
BSONObjIterator i(sortObj);
while (i.more()) {
BSONElement e = i.next();
@@ -679,7 +679,7 @@ bool LiteParsedQuery::isValidSortOrder(const BSONObj& sortObj) {
}
// static
-bool LiteParsedQuery::isQueryIsolated(const BSONObj& query) {
+bool QueryRequest::isQueryIsolated(const BSONObj& query) {
BSONObjIterator iter(query);
while (iter.more()) {
BSONElement elt = iter.next();
@@ -692,28 +692,27 @@ bool LiteParsedQuery::isQueryIsolated(const BSONObj& query) {
}
//
-// Old LiteParsedQuery parsing code: SOON TO BE DEPRECATED.
+// Old QueryRequest parsing code: SOON TO BE DEPRECATED.
//
// static
-StatusWith<unique_ptr<LiteParsedQuery>> LiteParsedQuery::fromLegacyQueryMessage(
- const QueryMessage& qm) {
- unique_ptr<LiteParsedQuery> pq(new LiteParsedQuery(NamespaceString(qm.ns)));
+StatusWith<unique_ptr<QueryRequest>> QueryRequest::fromLegacyQueryMessage(const QueryMessage& qm) {
+ unique_ptr<QueryRequest> qr(new QueryRequest(NamespaceString(qm.ns)));
- Status status = pq->init(qm.ntoskip, qm.ntoreturn, qm.queryOptions, qm.query, qm.fields, true);
+ Status status = qr->init(qm.ntoskip, qm.ntoreturn, qm.queryOptions, qm.query, qm.fields, true);
if (!status.isOK()) {
return status;
}
- return std::move(pq);
+ return std::move(qr);
}
-Status LiteParsedQuery::init(int ntoskip,
- int ntoreturn,
- int queryOptions,
- const BSONObj& queryObj,
- const BSONObj& proj,
- bool fromQueryMessage) {
+Status QueryRequest::init(int ntoskip,
+ int ntoreturn,
+ int queryOptions,
+ const BSONObj& queryObj,
+ const BSONObj& proj,
+ bool fromQueryMessage) {
_proj = proj.getOwned();
if (ntoskip) {
@@ -760,7 +759,7 @@ Status LiteParsedQuery::init(int ntoskip,
return validate();
}
-Status LiteParsedQuery::initFullQuery(const BSONObj& top) {
+Status QueryRequest::initFullQuery(const BSONObj& top) {
BSONObjIterator i(top);
while (i.more()) {
@@ -860,7 +859,7 @@ Status LiteParsedQuery::initFullQuery(const BSONObj& top) {
return Status::OK();
}
-int LiteParsedQuery::getOptions() const {
+int QueryRequest::getOptions() const {
int options = 0;
if (_tailable) {
options |= QueryOption_CursorTailable;
@@ -886,7 +885,7 @@ int LiteParsedQuery::getOptions() const {
return options;
}
-void LiteParsedQuery::initFromInt(int options) {
+void QueryRequest::initFromInt(int options) {
_tailable = (options & QueryOption_CursorTailable) != 0;
_slaveOk = (options & QueryOption_SlaveOk) != 0;
_oplogReplay = (options & QueryOption_OplogReplay) != 0;
@@ -896,7 +895,7 @@ void LiteParsedQuery::initFromInt(int options) {
_allowPartialResults = (options & QueryOption_PartialResults) != 0;
}
-void LiteParsedQuery::addMetaProjection() {
+void QueryRequest::addMetaProjection() {
// We might need to update the projection object with a $meta projection.
if (returnKey()) {
addReturnKeyMetaProj();
@@ -907,7 +906,7 @@ void LiteParsedQuery::addMetaProjection() {
}
}
-boost::optional<long long> LiteParsedQuery::getEffectiveBatchSize() const {
+boost::optional<long long> QueryRequest::getEffectiveBatchSize() const {
return _batchSize ? _batchSize : _ntoreturn;
}
diff --git a/src/mongo/db/query/lite_parsed_query.h b/src/mongo/db/query/query_request.h
index cf906b5b8d2..cd7fa62f393 100644
--- a/src/mongo/db/query/lite_parsed_query.h
+++ b/src/mongo/db/query/query_request.h
@@ -45,15 +45,15 @@ class StatusWith;
* Parses the QueryMessage or find command received from the user and makes the various fields
* more easily accessible.
*/
-class LiteParsedQuery {
+class QueryRequest {
public:
static const char kFindCommandName[];
static const char kShardVersionField[];
- LiteParsedQuery(NamespaceString nss);
+ QueryRequest(NamespaceString nss);
/**
- * Returns a non-OK status if any property of the LPQ has a bad value (e.g. a negative skip
+ * Returns a non-OK status if any property of the QR has a bad value (e.g. a negative skip
* value) or if there is a bad combination of options (e.g. awaitData is illegal without
* tailable).
*/
@@ -63,15 +63,15 @@ public:
* Parses a find command object, 'cmdObj'. Caller must indicate whether or not this lite
* parsed query is an explained query or not via 'isExplain'.
*
- * Returns a heap allocated LiteParsedQuery on success or an error if 'cmdObj' is not well
+ * Returns a heap allocated QueryRequest on success or an error if 'cmdObj' is not well
* formed.
*/
- static StatusWith<std::unique_ptr<LiteParsedQuery>> makeFromFindCommand(NamespaceString nss,
- const BSONObj& cmdObj,
- bool isExplain);
+ static StatusWith<std::unique_ptr<QueryRequest>> makeFromFindCommand(NamespaceString nss,
+ const BSONObj& cmdObj,
+ bool isExplain);
/**
- * Converts this LPQ into a find command.
+ * Converts this QR into a find command.
*/
BSONObj asFindCommand() const;
void asFindCommand(BSONObjBuilder* cmdBuilder) const;
@@ -382,11 +382,10 @@ public:
//
/**
- * Parse the provided QueryMessage and return a heap constructed LiteParsedQuery, which
+ * Parse the provided QueryMessage and return a heap constructed QueryRequest, which
* represents it or an error.
*/
- static StatusWith<std::unique_ptr<LiteParsedQuery>> fromLegacyQueryMessage(
- const QueryMessage& qm);
+ static StatusWith<std::unique_ptr<QueryRequest>> fromLegacyQueryMessage(const QueryMessage& qm);
private:
Status init(int ntoskip,
@@ -449,7 +448,7 @@ private:
boost::optional<long long> _batchSize;
// Set only when parsed from an OP_QUERY find message. The value is computed by driver or shell
- // and is set to be a min of batchSize and limit provided by user. LPQ can have set either
+ // and is set to be a min of batchSize and limit provided by user. QR can have set either
// ntoreturn or batchSize / limit.
boost::optional<long long> _ntoreturn;
diff --git a/src/mongo/db/query/query_request_test.cpp b/src/mongo/db/query/query_request_test.cpp
new file mode 100644
index 00000000000..384c723a5fd
--- /dev/null
+++ b/src/mongo/db/query/query_request_test.cpp
@@ -0,0 +1,1097 @@
+/**
+ * Copyright (C) 2013 MongoDB Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU Affero General Public License, version 3,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Affero General Public License for more details.
+ *
+ * You should have received a copy of the GNU Affero General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the GNU Affero General Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#include "mongo/platform/basic.h"
+
+#include <boost/optional.hpp>
+#include <boost/optional/optional_io.hpp>
+
+#include "mongo/db/json.h"
+#include "mongo/db/namespace_string.h"
+#include "mongo/db/query/query_request.h"
+#include "mongo/unittest/unittest.h"
+
+namespace mongo {
+namespace {
+
+using std::unique_ptr;
+using unittest::assertGet;
+
+static const NamespaceString testns("testdb.testcoll");
+
+TEST(QueryRequestTest, LimitWithNToReturn) {
+ QueryRequest qr(testns);
+ qr.setLimit(0);
+ qr.setNToReturn(0);
+ ASSERT_NOT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, BatchSizeWithNToReturn) {
+ QueryRequest qr(testns);
+ qr.setBatchSize(0);
+ qr.setNToReturn(0);
+ ASSERT_NOT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, NegativeSkip) {
+ QueryRequest qr(testns);
+ qr.setSkip(-1);
+ ASSERT_NOT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, ZeroSkip) {
+ QueryRequest qr(testns);
+ qr.setSkip(0);
+ ASSERT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, PositiveSkip) {
+ QueryRequest qr(testns);
+ qr.setSkip(1);
+ ASSERT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, NegativeLimit) {
+ QueryRequest qr(testns);
+ qr.setLimit(-1);
+ ASSERT_NOT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, ZeroLimit) {
+ QueryRequest qr(testns);
+ qr.setLimit(0);
+ ASSERT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, PositiveLimit) {
+ QueryRequest qr(testns);
+ qr.setLimit(1);
+ ASSERT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, NegativeBatchSize) {
+ QueryRequest qr(testns);
+ qr.setBatchSize(-1);
+ ASSERT_NOT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, ZeroBatchSize) {
+ QueryRequest qr(testns);
+ qr.setBatchSize(0);
+ ASSERT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, PositiveBatchSize) {
+ QueryRequest qr(testns);
+ qr.setBatchSize(1);
+ ASSERT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, NegativeNToReturn) {
+ QueryRequest qr(testns);
+ qr.setNToReturn(-1);
+ ASSERT_NOT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, ZeroNToReturn) {
+ QueryRequest qr(testns);
+ qr.setNToReturn(0);
+ ASSERT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, PositiveNToReturn) {
+ QueryRequest qr(testns);
+ qr.setNToReturn(1);
+ ASSERT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, NegativeMaxScan) {
+ QueryRequest qr(testns);
+ qr.setMaxScan(-1);
+ ASSERT_NOT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, ZeroMaxScan) {
+ QueryRequest qr(testns);
+ qr.setMaxScan(0);
+ ASSERT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, PositiveMaxScan) {
+ QueryRequest qr(testns);
+ qr.setMaxScan(1);
+ ASSERT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, NegativeMaxTimeMS) {
+ QueryRequest qr(testns);
+ qr.setMaxTimeMS(-1);
+ ASSERT_NOT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, ZeroMaxTimeMS) {
+ QueryRequest qr(testns);
+ qr.setMaxTimeMS(0);
+ ASSERT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, PositiveMaxTimeMS) {
+ QueryRequest qr(testns);
+ qr.setMaxTimeMS(1);
+ ASSERT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, ValidSortOrder) {
+ QueryRequest qr(testns);
+ qr.setSort(fromjson("{a: 1}"));
+ ASSERT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, InvalidSortOrderString) {
+ QueryRequest qr(testns);
+ qr.setSort(fromjson("{a: \"\"}"));
+ ASSERT_NOT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, MinFieldsNotPrefixOfMax) {
+ QueryRequest qr(testns);
+ qr.setMin(fromjson("{a: 1}"));
+ qr.setMax(fromjson("{b: 1}"));
+ ASSERT_NOT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, MinFieldsMoreThanMax) {
+ QueryRequest qr(testns);
+ qr.setMin(fromjson("{a: 1, b: 1}"));
+ qr.setMax(fromjson("{a: 1}"));
+ ASSERT_NOT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, MinFieldsLessThanMax) {
+ QueryRequest qr(testns);
+ qr.setMin(fromjson("{a: 1}"));
+ qr.setMax(fromjson("{a: 1, b: 1}"));
+ ASSERT_NOT_OK(qr.validate());
+}
+
+TEST(QueryRequestTest, ForbidTailableWithNonNaturalSort) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "tailable: true,"
+ "sort: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ForbidTailableWithSingleBatch) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "tailable: true,"
+ "singleBatch: true}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, AllowTailableWithNaturalSort) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "tailable: true,"
+ "sort: {$natural: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_OK(result.getStatus());
+ ASSERT_TRUE(result.getValue()->isTailable());
+ ASSERT_EQ(result.getValue()->getSort(), BSON("$natural" << 1));
+}
+
+TEST(QueryRequestTest, IsIsolatedReturnsTrueWithIsolated) {
+ ASSERT_TRUE(QueryRequest::isQueryIsolated(BSON("$isolated" << 1)));
+}
+
+TEST(QueryRequestTest, IsIsolatedReturnsTrueWithAtomic) {
+ ASSERT_TRUE(QueryRequest::isQueryIsolated(BSON("$atomic" << 1)));
+}
+
+TEST(QueryRequestTest, IsIsolatedReturnsFalseWithIsolated) {
+ ASSERT_FALSE(QueryRequest::isQueryIsolated(BSON("$isolated" << false)));
+}
+
+TEST(QueryRequestTest, IsIsolatedReturnsFalseWithAtomic) {
+ ASSERT_FALSE(QueryRequest::isQueryIsolated(BSON("$atomic" << false)));
+}
+
+//
+// Test compatibility of various projection and sort objects.
+//
+
+TEST(QueryRequestTest, ValidSortProj) {
+ QueryRequest qr(testns);
+ qr.setProj(fromjson("{a: 1}"));
+ qr.setSort(fromjson("{a: 1}"));
+ ASSERT_OK(qr.validate());
+
+ QueryRequest metaQR(testns);
+ metaQR.setProj(fromjson("{a: {$meta: \"textScore\"}}"));
+ metaQR.setSort(fromjson("{a: {$meta: \"textScore\"}}"));
+ ASSERT_OK(metaQR.validate());
+}
+
+TEST(QueryRequestTest, ForbidNonMetaSortOnFieldWithMetaProject) {
+ QueryRequest badQR(testns);
+ badQR.setProj(fromjson("{a: {$meta: \"textScore\"}}"));
+ badQR.setSort(fromjson("{a: 1}"));
+ ASSERT_NOT_OK(badQR.validate());
+
+ QueryRequest goodQR(testns);
+ goodQR.setProj(fromjson("{a: {$meta: \"textScore\"}}"));
+ goodQR.setSort(fromjson("{b: 1}"));
+ ASSERT_OK(goodQR.validate());
+}
+
+TEST(QueryRequestTest, ForbidMetaSortOnFieldWithoutMetaProject) {
+ QueryRequest qrMatching(testns);
+ qrMatching.setProj(fromjson("{a: 1}"));
+ qrMatching.setSort(fromjson("{a: {$meta: \"textScore\"}}"));
+ ASSERT_NOT_OK(qrMatching.validate());
+
+ QueryRequest qrNonMatching(testns);
+ qrNonMatching.setProj(fromjson("{b: 1}"));
+ qrNonMatching.setSort(fromjson("{a: {$meta: \"textScore\"}}"));
+ ASSERT_NOT_OK(qrNonMatching.validate());
+}
+
+//
+// Text meta BSON element validation
+//
+
+bool isFirstElementTextScoreMeta(const char* sortStr) {
+ BSONObj sortObj = fromjson(sortStr);
+ BSONElement elt = sortObj.firstElement();
+ bool result = QueryRequest::isTextScoreMeta(elt);
+ return result;
+}
+
+// Check validation of $meta expressions
+TEST(QueryRequestTest, IsTextScoreMeta) {
+ // Valid textScore meta sort
+ ASSERT(isFirstElementTextScoreMeta("{a: {$meta: \"textScore\"}}"));
+
+ // Invalid textScore meta sorts
+ ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$meta: 1}}"));
+ ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$meta: \"image\"}}"));
+ ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$world: \"textScore\"}}"));
+ ASSERT_FALSE(isFirstElementTextScoreMeta("{a: {$meta: \"textScore\", b: 1}}"));
+}
+
+//
+// Sort order validation
+// In a valid sort order, each element satisfies one of:
+// 1. a number with value 1
+// 2. a number with value -1
+// 3. isTextScoreMeta
+//
+
+TEST(QueryRequestTest, ValidateSortOrder) {
+ // Valid sorts
+ ASSERT(QueryRequest::isValidSortOrder(fromjson("{}")));
+ ASSERT(QueryRequest::isValidSortOrder(fromjson("{a: 1}")));
+ ASSERT(QueryRequest::isValidSortOrder(fromjson("{a: -1}")));
+ ASSERT(QueryRequest::isValidSortOrder(fromjson("{a: {$meta: \"textScore\"}}")));
+
+ // Invalid sorts
+ ASSERT_FALSE(QueryRequest::isValidSortOrder(fromjson("{a: 100}")));
+ ASSERT_FALSE(QueryRequest::isValidSortOrder(fromjson("{a: 0}")));
+ ASSERT_FALSE(QueryRequest::isValidSortOrder(fromjson("{a: -100}")));
+ ASSERT_FALSE(QueryRequest::isValidSortOrder(fromjson("{a: Infinity}")));
+ ASSERT_FALSE(QueryRequest::isValidSortOrder(fromjson("{a: -Infinity}")));
+ ASSERT_FALSE(QueryRequest::isValidSortOrder(fromjson("{a: true}")));
+ ASSERT_FALSE(QueryRequest::isValidSortOrder(fromjson("{a: false}")));
+ ASSERT_FALSE(QueryRequest::isValidSortOrder(fromjson("{a: null}")));
+ ASSERT_FALSE(QueryRequest::isValidSortOrder(fromjson("{a: {}}")));
+ ASSERT_FALSE(QueryRequest::isValidSortOrder(fromjson("{a: {b: 1}}")));
+ ASSERT_FALSE(QueryRequest::isValidSortOrder(fromjson("{a: []}")));
+ ASSERT_FALSE(QueryRequest::isValidSortOrder(fromjson("{a: [1, 2, 3]}")));
+ ASSERT_FALSE(QueryRequest::isValidSortOrder(fromjson("{a: \"\"}")));
+ ASSERT_FALSE(QueryRequest::isValidSortOrder(fromjson("{a: \"bb\"}")));
+ ASSERT_FALSE(QueryRequest::isValidSortOrder(fromjson("{a: {$meta: 1}}")));
+ ASSERT_FALSE(QueryRequest::isValidSortOrder(fromjson("{a: {$meta: \"image\"}}")));
+ ASSERT_FALSE(QueryRequest::isValidSortOrder(fromjson("{a: {$world: \"textScore\"}}")));
+ ASSERT_FALSE(
+ QueryRequest::isValidSortOrder(fromjson("{a: {$meta: \"textScore\","
+ " b: 1}}")));
+ ASSERT_FALSE(QueryRequest::isValidSortOrder(fromjson("{'': 1}")));
+ ASSERT_FALSE(QueryRequest::isValidSortOrder(fromjson("{'': -1}")));
+}
+
+//
+// Tests for parsing a query request from a command BSON object.
+//
+
+TEST(QueryRequestTest, ParseFromCommandBasic) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 3},"
+ "sort: {a: 1},"
+ "projection: {_id: 0, a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandWithOptions) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 3},"
+ "sort: {a: 1},"
+ "projection: {_id: 0, a: 1},"
+ "showRecordId: true,"
+ "maxScan: 1000}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<QueryRequest> qr(
+ assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ // Make sure the values from the command BSON are reflected in the QR.
+ ASSERT(qr->showRecordId());
+ ASSERT_EQUALS(1000, qr->getMaxScan());
+}
+
+TEST(QueryRequestTest, ParseFromCommandHintAsString) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "hint: 'foo_1'}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<QueryRequest> qr(
+ assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ BSONObj hintObj = qr->getHint();
+ ASSERT_EQUALS(BSON("$hint"
+ << "foo_1"),
+ hintObj);
+}
+
+TEST(QueryRequestTest, ParseFromCommandValidSortProj) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "projection: {a: 1},"
+ "sort: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ ASSERT_OK(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandValidSortProjMeta) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "projection: {a: {$meta: 'textScore'}},"
+ "sort: {a: {$meta: 'textScore'}}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ ASSERT_OK(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandAllFlagsTrue) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "tailable: true,"
+ "oplogReplay: true,"
+ "noCursorTimeout: true,"
+ "awaitData: true,"
+ "allowPartialResults: true}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<QueryRequest> qr(
+ assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ // Test that all the flags got set to true.
+ ASSERT(qr->isTailable());
+ ASSERT(!qr->isSlaveOk());
+ ASSERT(qr->isOplogReplay());
+ ASSERT(qr->isNoCursorTimeout());
+ ASSERT(qr->isAwaitData());
+ ASSERT(qr->isAllowPartialResults());
+}
+
+TEST(QueryRequestTest, ParseFromCommandCommentWithValidMinMax) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "comment: 'the comment',"
+ "min: {a: 1},"
+ "max: {a: 2}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<QueryRequest> qr(
+ assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ ASSERT_EQUALS("the comment", qr->getComment());
+ BSONObj expectedMin = BSON("a" << 1);
+ ASSERT_EQUALS(0, expectedMin.woCompare(qr->getMin()));
+ BSONObj expectedMax = BSON("a" << 2);
+ ASSERT_EQUALS(0, expectedMax.woCompare(qr->getMax()));
+}
+
+TEST(QueryRequestTest, ParseFromCommandAllNonOptionFields) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "sort: {b: 1},"
+ "projection: {c: 1},"
+ "hint: {d: 1},"
+ "readConcern: {e: 1},"
+ "collation: {f: 1},"
+ "limit: 3,"
+ "skip: 5,"
+ "batchSize: 90,"
+ "singleBatch: false}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<QueryRequest> qr(
+ assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ // Check the values inside the QR.
+ BSONObj expectedQuery = BSON("a" << 1);
+ ASSERT_EQUALS(0, expectedQuery.woCompare(qr->getFilter()));
+ BSONObj expectedSort = BSON("b" << 1);
+ ASSERT_EQUALS(0, expectedSort.woCompare(qr->getSort()));
+ BSONObj expectedProj = BSON("c" << 1);
+ ASSERT_EQUALS(0, expectedProj.woCompare(qr->getProj()));
+ BSONObj expectedHint = BSON("d" << 1);
+ ASSERT_EQUALS(0, expectedHint.woCompare(qr->getHint()));
+ BSONObj expectedReadConcern = BSON("e" << 1);
+ ASSERT_EQUALS(0, expectedReadConcern.woCompare(qr->getReadConcern()));
+ BSONObj expectedCollation = BSON("f" << 1);
+ ASSERT_EQUALS(0, expectedCollation.woCompare(qr->getCollation()));
+ ASSERT_EQUALS(3, *qr->getLimit());
+ ASSERT_EQUALS(5, *qr->getSkip());
+ ASSERT_EQUALS(90, *qr->getBatchSize());
+ ASSERT(qr->wantMore());
+}
+
+TEST(QueryRequestTest, ParseFromCommandLargeLimit) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "limit: 8000000000}"); // 8 * 1000 * 1000 * 1000
+ const NamespaceString nss("test.testns");
+ const bool isExplain = false;
+ unique_ptr<QueryRequest> qr(
+ assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ ASSERT_EQUALS(8LL * 1000 * 1000 * 1000, *qr->getLimit());
+}
+
+TEST(QueryRequestTest, ParseFromCommandLargeBatchSize) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "batchSize: 8000000000}"); // 8 * 1000 * 1000 * 1000
+ const NamespaceString nss("test.testns");
+ const bool isExplain = false;
+ unique_ptr<QueryRequest> qr(
+ assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ ASSERT_EQUALS(8LL * 1000 * 1000 * 1000, *qr->getBatchSize());
+}
+
+TEST(QueryRequestTest, ParseFromCommandLargeSkip) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "skip: 8000000000}"); // 8 * 1000 * 1000 * 1000
+ const NamespaceString nss("test.testns");
+ const bool isExplain = false;
+ unique_ptr<QueryRequest> qr(
+ assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ ASSERT_EQUALS(8LL * 1000 * 1000 * 1000, *qr->getSkip());
+}
+
+//
+// Parsing errors where a field has the wrong type.
+//
+
+TEST(QueryRequestTest, ParseFromCommandQueryWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandSortWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "sort: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandProjWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "projection: 'foo'}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandSkipWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "skip: '5',"
+ "projection: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandLimitWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "limit: '5',"
+ "projection: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandSingleBatchWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "singleBatch: 'false',"
+ "projection: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandCommentWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "comment: 1}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandMaxScanWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "maxScan: true,"
+ "comment: 'foo'}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandMaxTimeMSWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "maxTimeMS: true}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandMaxWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "max: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandMinWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "min: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandReturnKeyWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "returnKey: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+
+TEST(QueryRequestTest, ParseFromCommandShowRecordIdWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "showRecordId: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandSnapshotWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "snapshot: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandTailableWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "tailable: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandSlaveOkWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "slaveOk: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandOplogReplayWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "oplogReplay: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandNoCursorTimeoutWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "noCursorTimeout: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandAwaitDataWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "tailable: true,"
+ "awaitData: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandExhaustWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "exhaust: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandPartialWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "allowPartialResults: 3}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandReadConcernWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "readConcern: 'foo'}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandCollationWrongType) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "filter: {a: 1},"
+ "collation: 'foo'}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+//
+// Parsing errors where a field has the right type but a bad value.
+//
+
+TEST(QueryRequestTest, ParseFromCommandNegativeSkipError) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "skip: -3,"
+ "filter: {a: 3}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandSkipIsZero) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "skip: 0,"
+ "filter: {a: 3}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<QueryRequest> qr(
+ assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+ ASSERT_EQ(BSON("a" << 3), qr->getFilter());
+ ASSERT_FALSE(qr->getSkip());
+}
+
+TEST(QueryRequestTest, ParseFromCommandNegativeLimitError) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "limit: -3,"
+ "filter: {a: 3}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandLimitIsZero) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "limit: 0,"
+ "filter: {a: 3}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<QueryRequest> qr(
+ assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+ ASSERT_EQ(BSON("a" << 3), qr->getFilter());
+ ASSERT_FALSE(qr->getLimit());
+}
+
+TEST(QueryRequestTest, ParseFromCommandNegativeBatchSizeError) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "batchSize: -10,"
+ "filter: {a: 3}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandBatchSizeZero) {
+ BSONObj cmdObj = fromjson("{find: 'testns', batchSize: 0}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<QueryRequest> qr(
+ assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ ASSERT(qr->getBatchSize());
+ ASSERT_EQ(0, *qr->getBatchSize());
+
+ ASSERT(!qr->getLimit());
+}
+
+TEST(QueryRequestTest, ParseFromCommandDefaultBatchSize) {
+ BSONObj cmdObj = fromjson("{find: 'testns'}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<QueryRequest> qr(
+ assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ ASSERT(!qr->getBatchSize());
+ ASSERT(!qr->getLimit());
+}
+
+//
+// Errors checked in QueryRequest::validate().
+//
+
+TEST(QueryRequestTest, ParseFromCommandMinMaxDifferentFieldsError) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "min: {a: 3},"
+ "max: {b: 4}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandSnapshotPlusSortError) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "sort: {a: 3},"
+ "snapshot: true}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandSnapshotPlusHintError) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "snapshot: true,"
+ "hint: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseCommandForbidNonMetaSortOnFieldWithMetaProject) {
+ BSONObj cmdObj;
+
+ cmdObj = fromjson(
+ "{find: 'testns',"
+ "projection: {a: {$meta: 'textScore'}},"
+ "sort: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+
+ cmdObj = fromjson(
+ "{find: 'testns',"
+ "projection: {a: {$meta: 'textScore'}},"
+ "sort: {b: 1}}");
+ ASSERT_OK(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
+}
+
+TEST(QueryRequestTest, ParseCommandForbidMetaSortOnFieldWithoutMetaProject) {
+ BSONObj cmdObj;
+
+ cmdObj = fromjson(
+ "{find: 'testns',"
+ "projection: {a: 1},"
+ "sort: {a: {$meta: 'textScore'}}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+
+ cmdObj = fromjson(
+ "{find: 'testns',"
+ "projection: {b: 1},"
+ "sort: {a: {$meta: 'textScore'}}}");
+ result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseCommandForbidExhaust) {
+ BSONObj cmdObj = fromjson("{find: 'testns', exhaust: true}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseCommandIsFromFindCommand) {
+ BSONObj cmdObj = fromjson("{find: 'testns'}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ unique_ptr<QueryRequest> qr(
+ assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ ASSERT_FALSE(qr->getNToReturn());
+}
+
+TEST(QueryRequestTest, ParseCommandAwaitDataButNotTailable) {
+ const NamespaceString nss("test.testns");
+ BSONObj cmdObj = fromjson("{find: 'testns', awaitData: true}");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseCommandFirstFieldNotString) {
+ BSONObj cmdObj = fromjson("{find: 1}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseCommandIgnoreShardVersionField) {
+ BSONObj cmdObj = fromjson("{find: 'test.testns', shardVersion: 'foo'}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, DefaultQueryParametersCorrect) {
+ BSONObj cmdObj = fromjson("{find: 'testns'}");
+
+ const NamespaceString nss("test.testns");
+ std::unique_ptr<QueryRequest> qr(
+ assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, false)));
+
+ ASSERT_FALSE(qr->getSkip());
+ ASSERT_FALSE(qr->getLimit());
+
+ ASSERT_EQUALS(true, qr->wantMore());
+ ASSERT_FALSE(qr->getNToReturn());
+ ASSERT_EQUALS(false, qr->isExplain());
+ ASSERT_EQUALS(0, qr->getMaxScan());
+ ASSERT_EQUALS(0, qr->getMaxTimeMS());
+ ASSERT_EQUALS(false, qr->returnKey());
+ ASSERT_EQUALS(false, qr->showRecordId());
+ ASSERT_EQUALS(false, qr->isSnapshot());
+ ASSERT_EQUALS(false, qr->hasReadPref());
+ ASSERT_EQUALS(false, qr->isTailable());
+ ASSERT_EQUALS(false, qr->isSlaveOk());
+ ASSERT_EQUALS(false, qr->isOplogReplay());
+ ASSERT_EQUALS(false, qr->isNoCursorTimeout());
+ ASSERT_EQUALS(false, qr->isAwaitData());
+ ASSERT_EQUALS(false, qr->isExhaust());
+ ASSERT_EQUALS(false, qr->isAllowPartialResults());
+}
+
+//
+// Extra fields cause the parse to fail.
+//
+
+TEST(QueryRequestTest, ParseFromCommandForbidExtraField) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "snapshot: true,"
+ "foo: {a: 1}}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseFromCommandForbidExtraOption) {
+ BSONObj cmdObj = fromjson(
+ "{find: 'testns',"
+ "snapshot: true,"
+ "foo: true}");
+ const NamespaceString nss("test.testns");
+ bool isExplain = false;
+ auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ ASSERT_NOT_OK(result.getStatus());
+}
+
+TEST(QueryRequestTest, ParseMaxTimeMSStringValueFails) {
+ BSONObj maxTimeObj = BSON(QueryRequest::cmdOptionMaxTimeMS << "foo");
+ ASSERT_NOT_OK(QueryRequest::parseMaxTimeMS(maxTimeObj[QueryRequest::cmdOptionMaxTimeMS]));
+}
+
+TEST(QueryRequestTest, ParseMaxTimeMSNonIntegralValueFails) {
+ BSONObj maxTimeObj = BSON(QueryRequest::cmdOptionMaxTimeMS << 100.3);
+ ASSERT_NOT_OK(QueryRequest::parseMaxTimeMS(maxTimeObj[QueryRequest::cmdOptionMaxTimeMS]));
+}
+
+TEST(QueryRequestTest, ParseMaxTimeMSOutOfRangeDoubleFails) {
+ BSONObj maxTimeObj = BSON(QueryRequest::cmdOptionMaxTimeMS << 1e200);
+ ASSERT_NOT_OK(QueryRequest::parseMaxTimeMS(maxTimeObj[QueryRequest::cmdOptionMaxTimeMS]));
+}
+
+TEST(QueryRequestTest, ParseMaxTimeMSNegativeValueFails) {
+ BSONObj maxTimeObj = BSON(QueryRequest::cmdOptionMaxTimeMS << -400);
+ ASSERT_NOT_OK(QueryRequest::parseMaxTimeMS(maxTimeObj[QueryRequest::cmdOptionMaxTimeMS]));
+}
+
+TEST(QueryRequestTest, ParseMaxTimeMSZeroSucceeds) {
+ BSONObj maxTimeObj = BSON(QueryRequest::cmdOptionMaxTimeMS << 0);
+ auto maxTime = QueryRequest::parseMaxTimeMS(maxTimeObj[QueryRequest::cmdOptionMaxTimeMS]);
+ ASSERT_OK(maxTime);
+ ASSERT_EQ(maxTime.getValue(), 0);
+}
+
+TEST(QueryRequestTest, ParseMaxTimeMSPositiveInRangeSucceeds) {
+ BSONObj maxTimeObj = BSON(QueryRequest::cmdOptionMaxTimeMS << 300);
+ auto maxTime = QueryRequest::parseMaxTimeMS(maxTimeObj[QueryRequest::cmdOptionMaxTimeMS]);
+ ASSERT_OK(maxTime);
+ ASSERT_EQ(maxTime.getValue(), 300);
+}
+
+} // namespace mongo
+} // namespace
diff --git a/src/mongo/db/query/query_settings.cpp b/src/mongo/db/query/query_settings.cpp
index 487f34ebcf9..385a4eec4c8 100644
--- a/src/mongo/db/query/query_settings.cpp
+++ b/src/mongo/db/query/query_settings.cpp
@@ -119,10 +119,10 @@ std::vector<AllowedIndexEntry*> QuerySettings::getAllAllowedIndices() const {
void QuerySettings::setAllowedIndices(const CanonicalQuery& canonicalQuery,
const PlanCacheKey& key,
const std::vector<BSONObj>& indexes) {
- const LiteParsedQuery& lpq = canonicalQuery.getParsed();
- const BSONObj& query = lpq.getFilter();
- const BSONObj& sort = lpq.getSort();
- const BSONObj& projection = lpq.getProj();
+ const QueryRequest& qr = canonicalQuery.getQueryRequest();
+ const BSONObj& query = qr.getFilter();
+ const BSONObj& sort = qr.getSort();
+ const BSONObj& projection = qr.getProj();
AllowedIndexEntry* entry = new AllowedIndexEntry(query, sort, projection, indexes);
stdx::lock_guard<stdx::mutex> cacheLock(_mutex);
diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp
index 85da961f66c..49a767e3b2d 100644
--- a/src/mongo/db/ttl.cpp
+++ b/src/mongo/db/ttl.cpp
@@ -288,10 +288,10 @@ private:
const char* keyFieldName = key.firstElement().fieldName();
BSONObj query =
BSON(keyFieldName << BSON("$gte" << kDawnOfTime << "$lte" << expirationTime));
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(query);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(query);
auto canonicalQuery = CanonicalQuery::canonicalize(
- txn, std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn, std::move(qr), ExtensionsCallbackDisallowExtensions());
invariantOK(canonicalQuery.getStatus());
DeleteStageParams params;
diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp
index c8f9bca5e29..5d18e0c4bb1 100644
--- a/src/mongo/dbtests/documentsourcetests.cpp
+++ b/src/mongo/dbtests/documentsourcetests.cpp
@@ -96,12 +96,12 @@ protected:
OldClientWriteContext ctx(&_opCtx, nss.ns());
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
if (hint) {
- lpq->setHint(*hint);
+ qr->setHint(*hint);
}
auto cq = uassertStatusOK(CanonicalQuery::canonicalize(
- &_opCtx, std::move(lpq), ExtensionsCallbackDisallowExtensions()));
+ &_opCtx, std::move(qr), ExtensionsCallbackDisallowExtensions()));
_exec = uassertStatusOK(
getExecutor(&_opCtx, ctx.getCollection(), std::move(cq), PlanExecutor::YIELD_MANUAL));
diff --git a/src/mongo/dbtests/executor_registry.cpp b/src/mongo/dbtests/executor_registry.cpp
index dd15521cfc2..c6fb21f1b1d 100644
--- a/src/mongo/dbtests/executor_registry.cpp
+++ b/src/mongo/dbtests/executor_registry.cpp
@@ -77,9 +77,9 @@ public:
unique_ptr<CollectionScan> scan(new CollectionScan(&_opCtx, params, ws.get(), NULL));
// Create a plan executor to hold it
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
auto statusWithCQ = CanonicalQuery::canonicalize(
- &_opCtx, std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ &_opCtx, std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
diff --git a/src/mongo/dbtests/oplogstarttests.cpp b/src/mongo/dbtests/oplogstarttests.cpp
index 4a01d5ee9b2..5dedeac260a 100644
--- a/src/mongo/dbtests/oplogstarttests.cpp
+++ b/src/mongo/dbtests/oplogstarttests.cpp
@@ -75,10 +75,10 @@ protected:
}
void setupFromQuery(const BSONObj& query) {
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(query);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(query);
auto statusWithCQ = CanonicalQuery::canonicalize(
- &_txn, std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ &_txn, std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
_cq = std::move(statusWithCQ.getValue());
_oplogws.reset(new WorkingSet());
diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp
index 6acaffe12ff..4d91206d198 100644
--- a/src/mongo/dbtests/plan_ranking.cpp
+++ b/src/mongo/dbtests/plan_ranking.cpp
@@ -198,10 +198,10 @@ public:
// Run the query {a:4, b:1}.
{
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(BSON("a" << 100 << "b" << 1));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(BSON("a" << 100 << "b" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
verify(statusWithCQ.isOK());
cq = std::move(statusWithCQ.getValue());
ASSERT(cq.get());
@@ -218,10 +218,10 @@ public:
// And run the same query again.
{
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(BSON("a" << 100 << "b" << 1));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(BSON("a" << 100 << "b" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
verify(statusWithCQ.isOK());
cq = std::move(statusWithCQ.getValue());
}
@@ -254,10 +254,10 @@ public:
addIndex(BSON("b" << 1));
// Run the query {a:1, b:{$gt:1}.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(BSON("a" << 1 << "b" << BSON("$gt" << 1)));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(BSON("a" << 1 << "b" << BSON("$gt" << 1)));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
verify(statusWithCQ.isOK());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
@@ -295,11 +295,11 @@ public:
addIndex(BSON("a" << 1 << "b" << 1));
// Query for a==27 with projection that wants 'a' and 'b'.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(BSON("a" << 27));
- lpq->setProj(BSON("_id" << 0 << "a" << 1 << "b" << 1));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(BSON("a" << 27));
+ qr->setProj(BSON("_id" << 0 << "a" << 1 << "b" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
@@ -331,10 +331,10 @@ public:
addIndex(BSON("b" << 1));
// There is no data that matches this query but we don't know that until EOF.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(BSON("a" << 1 << "b" << 1 << "c" << 99));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(BSON("a" << 1 << "b" << 1 << "c" << 99));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
@@ -369,12 +369,12 @@ public:
// There is no data that matches this query ({a:2}). Both scans will hit EOF before
// returning any data.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(BSON("a" << 2));
- lpq->setProj(BSON("_id" << 0 << "a" << 1 << "b" << 1));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(BSON("a" << 2));
+ qr->setProj(BSON("_id" << 0 << "a" << 1 << "b" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
@@ -405,10 +405,10 @@ public:
addIndex(BSON("b" << 1));
// Run the query {a:N+1, b:1}. (No such document.)
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(BSON("a" << N + 1 << "b" << 1));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(BSON("a" << N + 1 << "b" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
verify(statusWithCQ.isOK());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
@@ -442,10 +442,10 @@ public:
addIndex(BSON("b" << 1));
// Run the query {a:N+1, b:1}. (No such document.)
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(BSON("a" << BSON("$gte" << N + 1) << "b" << 1));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(BSON("a" << BSON("$gte" << N + 1) << "b" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
verify(statusWithCQ.isOK());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
@@ -472,11 +472,11 @@ public:
// Run a query with a sort. The blocking sort won't produce any data during the
// evaluation period.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(BSON("_id" << BSON("$gte" << 20 << "$lte" << 200)));
- lpq->setSort(BSON("c" << 1));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(BSON("_id" << BSON("$gte" << 20 << "$lte" << 200)));
+ qr->setSort(BSON("c" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -503,10 +503,10 @@ public:
}
// Look for A Space Odyssey.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(BSON("foo" << 2001));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(BSON("foo" << 2001));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
verify(statusWithCQ.isOK());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
@@ -537,11 +537,11 @@ public:
addIndex(BSON("d" << 1 << "e" << 1));
// Query: find({a: 1}).sort({d: 1})
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(BSON("a" << 1));
- lpq->setSort(BSON("d" << 1));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(BSON("a" << 1));
+ qr->setSort(BSON("d" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
@@ -576,10 +576,10 @@ public:
// Solutions using either 'a' or 'b' will take a long time to start producing
// results. However, an index scan on 'b' will start producing results sooner
// than an index scan on 'a'.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(fromjson("{a: 1, b: 1, c: {$gte: 5000}}"));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson("{a: 1, b: 1, c: {$gte: 5000}}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
@@ -609,10 +609,10 @@ public:
addIndex(BSON("b" << 1 << "c" << 1));
addIndex(BSON("a" << 1));
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(fromjson("{a: 9, b: {$ne: 10}, c: 9}"));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson("{a: 9, b: {$ne: 10}, c: 9}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp
index 339be430f62..b06793e3b3f 100644
--- a/src/mongo/dbtests/query_plan_executor.cpp
+++ b/src/mongo/dbtests/query_plan_executor.cpp
@@ -102,10 +102,10 @@ public:
unique_ptr<WorkingSet> ws(new WorkingSet());
// Canonicalize the query.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(filterObj);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(filterObj);
auto statusWithCQ = CanonicalQuery::canonicalize(
- &_txn, std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ &_txn, std::move(qr), ExtensionsCallbackDisallowExtensions());
verify(statusWithCQ.isOK());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
verify(NULL != cq.get());
@@ -149,9 +149,9 @@ public:
IndexScan* ix = new IndexScan(&_txn, ixparams, ws.get(), NULL);
unique_ptr<PlanStage> root(new FetchStage(&_txn, ws.get(), ix, NULL, coll));
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
auto statusWithCQ = CanonicalQuery::canonicalize(
- &_txn, std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ &_txn, std::move(qr), ExtensionsCallbackDisallowExtensions());
verify(statusWithCQ.isOK());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
verify(NULL != cq.get());
diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp
index f090e61f8c4..47f73275a45 100644
--- a/src/mongo/dbtests/query_stage_cached_plan.cpp
+++ b/src/mongo/dbtests/query_stage_cached_plan.cpp
@@ -120,10 +120,10 @@ public:
ASSERT(collection);
// Query can be answered by either index on "a" or index on "b".
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(fromjson("{a: {$gte: 8}, b: 1}"));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson("{a: {$gte: 8}, b: 1}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
const std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -188,10 +188,10 @@ public:
ASSERT(collection);
// Query can be answered by either index on "a" or index on "b".
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(fromjson("{a: {$gte: 8}, b: 1}"));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson("{a: {$gte: 8}, b: 1}"));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
const std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
diff --git a/src/mongo/dbtests/query_stage_delete.cpp b/src/mongo/dbtests/query_stage_delete.cpp
index d7f3fc5d655..65f47ba7429 100644
--- a/src/mongo/dbtests/query_stage_delete.cpp
+++ b/src/mongo/dbtests/query_stage_delete.cpp
@@ -104,10 +104,10 @@ public:
}
unique_ptr<CanonicalQuery> canonicalize(const BSONObj& query) {
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(query);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(query);
auto statusWithCQ = CanonicalQuery::canonicalize(
- &_txn, std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ &_txn, std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
diff --git a/src/mongo/dbtests/query_stage_multiplan.cpp b/src/mongo/dbtests/query_stage_multiplan.cpp
index a76bfbe22c1..a5b4d000a1e 100644
--- a/src/mongo/dbtests/query_stage_multiplan.cpp
+++ b/src/mongo/dbtests/query_stage_multiplan.cpp
@@ -168,10 +168,10 @@ public:
new CollectionScan(&_txn, csparams, sharedWs.get(), filter.get()));
// Hand the plans off to the MPS.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(BSON("foo" << 7));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(BSON("foo" << 7));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
verify(statusWithCQ.isOK());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
verify(NULL != cq.get());
@@ -226,11 +226,11 @@ public:
Collection* collection = ctx.getCollection();
// Query for both 'a' and 'b' and sort on 'b'.
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(BSON("a" << 1 << "b" << 1));
- lpq->setSort(BSON("b" << 1));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(BSON("a" << 1 << "b" << 1));
+ qr->setSort(BSON("b" << 1));
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
verify(statusWithCQ.isOK());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
ASSERT(NULL != cq.get());
@@ -332,10 +332,10 @@ public:
AutoGetCollectionForRead ctx(&_txn, nss.ns());
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(BSON("x" << 1));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(BSON("x" << 1));
auto cq = uassertStatusOK(CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions()));
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
unique_ptr<MultiPlanStage> mps =
make_unique<MultiPlanStage>(&_txn, ctx.getCollection(), cq.get());
@@ -408,10 +408,10 @@ public:
Collection* coll = ctx.getCollection();
// Create the executor (Matching all documents).
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(BSON("foo" << BSON("$gte" << 0)));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(BSON("foo" << BSON("$gte" << 0)));
auto cq = uassertStatusOK(CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions()));
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
auto exec =
uassertStatusOK(getExecutor(&_txn, coll, std::move(cq), PlanExecutor::YIELD_MANUAL));
diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp
index 36a6e9f502b..04fdbb1c6b2 100644
--- a/src/mongo/dbtests/query_stage_subplan.cpp
+++ b/src/mongo/dbtests/query_stage_subplan.cpp
@@ -77,11 +77,10 @@ protected:
BSONObj cmdObj = fromjson(findCmd);
bool isExplain = false;
- auto lpq =
- unittest::assertGet(LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain));
+ auto qr = unittest::assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain));
auto cq = unittest::assertGet(
- CanonicalQuery::canonicalize(txn(), std::move(lpq), ExtensionsCallbackNoop()));
+ CanonicalQuery::canonicalize(txn(), std::move(qr), ExtensionsCallbackNoop()));
return cq;
}
@@ -114,10 +113,10 @@ public:
"{$or: [{a: {$geoWithin: {$centerSphere: [[0,0],10]}}},"
"{a: {$geoWithin: {$centerSphere: [[1,1],10]}}}]}");
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(query);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(query);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -160,10 +159,10 @@ public:
Collection* collection = ctx.getCollection();
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(query);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(query);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -218,10 +217,10 @@ public:
Collection* collection = ctx.getCollection();
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(query);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(query);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -277,10 +276,10 @@ public:
Collection* collection = ctx.getCollection();
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(query);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(query);
auto statusWithCQ = CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
std::unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
@@ -533,10 +532,10 @@ public:
insert(BSON("_id" << 3 << "a" << 1 << "c" << 3));
insert(BSON("_id" << 4 << "a" << 1 << "c" << 4));
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(query);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(query);
auto cq = unittest::assertGet(CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions()));
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
Collection* collection = ctx.getCollection();
@@ -592,11 +591,11 @@ public:
insert(BSON("_id" << 3 << "a" << 3));
insert(BSON("_id" << 4));
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(fromjson("{$or: [{a: 1}, {a: {$ne:1}}]}"));
- lpq->setSort(BSON("d" << 1));
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(fromjson("{$or: [{a: 1}, {a: {$ne:1}}]}"));
+ qr->setSort(BSON("d" << 1));
auto cq = unittest::assertGet(CanonicalQuery::canonicalize(
- txn(), std::move(lpq), ExtensionsCallbackDisallowExtensions()));
+ txn(), std::move(qr), ExtensionsCallbackDisallowExtensions()));
Collection* collection = ctx.getCollection();
diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp
index cc551dac61b..8c2897d7ac1 100644
--- a/src/mongo/dbtests/query_stage_update.cpp
+++ b/src/mongo/dbtests/query_stage_update.cpp
@@ -88,10 +88,10 @@ public:
}
unique_ptr<CanonicalQuery> canonicalize(const BSONObj& query) {
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(query);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(query);
auto statusWithCQ = CanonicalQuery::canonicalize(
- &_txn, std::move(lpq), ExtensionsCallbackDisallowExtensions());
+ &_txn, std::move(qr), ExtensionsCallbackDisallowExtensions());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 46cc769b1d1..9523e4e5820 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -43,7 +43,6 @@
#include "mongo/db/json.h"
#include "mongo/db/lasterror.h"
#include "mongo/db/query/find.h"
-#include "mongo/db/query/lite_parsed_query.h"
#include "mongo/db/service_context.h"
#include "mongo/db/service_context_d.h"
#include "mongo/dbtests/dbtests.h"
diff --git a/src/mongo/executor/SConscript b/src/mongo/executor/SConscript
index b138a11f1e1..ff6ff911da8 100644
--- a/src/mongo/executor/SConscript
+++ b/src/mongo/executor/SConscript
@@ -319,7 +319,7 @@ env.Library(
'$BUILD_DIR/mongo/client/client_query',
'$BUILD_DIR/mongo/db/query/command_request_response',
'$BUILD_DIR/mongo/db/query/command_request_response',
- '$BUILD_DIR/mongo/db/query/lite_parsed_query',
+ '$BUILD_DIR/mongo/db/query/query_request',
'$BUILD_DIR/mongo/rpc/protocol',
'$BUILD_DIR/mongo/rpc/metadata',
'$BUILD_DIR/mongo/util/net/network',
diff --git a/src/mongo/executor/downconvert_find_and_getmore_commands.cpp b/src/mongo/executor/downconvert_find_and_getmore_commands.cpp
index 667c955988f..df3492d15f1 100644
--- a/src/mongo/executor/downconvert_find_and_getmore_commands.cpp
+++ b/src/mongo/executor/downconvert_find_and_getmore_commands.cpp
@@ -143,44 +143,44 @@ StatusWith<Message> downconvertFindCommandRequest(const RemoteCommandRequest& re
const std::string& ns = nss.ns();
- // It is a little heavy handed to use LiteParsedQuery to convert the command object to
+ // It is a little heavy handed to use QueryRequest to convert the command object to
// query() arguments but we get validation and consistent behavior with the find
// command implementation on the remote server.
- auto lpqStatus = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, false);
- if (!lpqStatus.isOK()) {
- return lpqStatus.getStatus();
+ auto qrStatus = QueryRequest::makeFromFindCommand(nss, cmdObj, false);
+ if (!qrStatus.isOK()) {
+ return qrStatus.getStatus();
}
- auto lpq = std::move(lpqStatus.getValue());
+ auto qr = std::move(qrStatus.getValue());
// We are downconverting a find command, and find command can only have ntoreturn
// if it was generated by mongos.
- invariant(!lpq->getNToReturn());
- Query query(lpq->getFilter());
- if (!lpq->getSort().isEmpty()) {
- query.sort(lpq->getSort());
+ invariant(!qr->getNToReturn());
+ Query query(qr->getFilter());
+ if (!qr->getSort().isEmpty()) {
+ query.sort(qr->getSort());
}
- if (!lpq->getHint().isEmpty()) {
- query.hint(lpq->getHint());
+ if (!qr->getHint().isEmpty()) {
+ query.hint(qr->getHint());
}
- if (!lpq->getMin().isEmpty()) {
- query.minKey(lpq->getMin());
+ if (!qr->getMin().isEmpty()) {
+ query.minKey(qr->getMin());
}
- if (!lpq->getMax().isEmpty()) {
- query.minKey(lpq->getMax());
+ if (!qr->getMax().isEmpty()) {
+ query.minKey(qr->getMax());
}
- if (lpq->isExplain()) {
+ if (qr->isExplain()) {
query.explain();
}
- if (lpq->isSnapshot()) {
+ if (qr->isSnapshot()) {
query.snapshot();
}
- const int nToReturn = lpq->getLimit().value_or(0) * -1;
- const int nToSkip = lpq->getSkip().value_or(0);
- const BSONObj* fieldsToReturn = &lpq->getProj();
- int queryOptions = lpq->getOptions(); // non-const so we can set slaveOk if we need to
- const int batchSize = lpq->getBatchSize().value_or(0);
+ const int nToReturn = qr->getLimit().value_or(0) * -1;
+ const int nToSkip = qr->getSkip().value_or(0);
+ const BSONObj* fieldsToReturn = &qr->getProj();
+ int queryOptions = qr->getOptions(); // non-const so we can set slaveOk if we need to
+ const int batchSize = qr->getBatchSize().value_or(0);
const int nextBatchSize = [batchSize, nToReturn]() {
if (nToReturn == 0)
diff --git a/src/mongo/executor/network_interface_asio_operation.cpp b/src/mongo/executor/network_interface_asio_operation.cpp
index 1fe4f33b88d..fdce0f3c0d4 100644
--- a/src/mongo/executor/network_interface_asio_operation.cpp
+++ b/src/mongo/executor/network_interface_asio_operation.cpp
@@ -34,7 +34,7 @@
#include "mongo/base/status_with.h"
#include "mongo/db/query/getmore_request.h"
-#include "mongo/db/query/lite_parsed_query.h"
+#include "mongo/db/query/query_request.h"
#include "mongo/executor/async_stream_interface.h"
#include "mongo/executor/connection_pool_asio.h"
#include "mongo/executor/downconvert_find_and_getmore_commands.h"
@@ -200,7 +200,7 @@ Status NetworkInterfaceASIO::AsyncOp::beginCommand(const RemoteCommandRequest& r
rpc::EgressMetadataHook* metadataHook) {
// Check if we need to downconvert find or getMore commands.
StringData commandName = request.cmdObj.firstElement().fieldNameStringData();
- const auto isFindCmd = commandName == LiteParsedQuery::kFindCommandName;
+ const auto isFindCmd = commandName == QueryRequest::kFindCommandName;
const auto isGetMoreCmd = commandName == GetMoreRequest::kGetMoreCommandName;
const auto isFindOrGetMoreCmd = isFindCmd || isGetMoreCmd;
diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript
index edf38cbde4e..74970615d7b 100644
--- a/src/mongo/s/SConscript
+++ b/src/mongo/s/SConscript
@@ -58,8 +58,8 @@ env.Library(
'$BUILD_DIR/mongo/db/common',
'$BUILD_DIR/mongo/db/matcher/expressions',
'$BUILD_DIR/mongo/db/ops/update_common',
- '$BUILD_DIR/mongo/db/query/lite_parsed_query',
'$BUILD_DIR/mongo/db/query/query_planner',
+ '$BUILD_DIR/mongo/db/query/query_request',
'$BUILD_DIR/mongo/db/repl/optime',
'$BUILD_DIR/mongo/rpc/metadata',
]
diff --git a/src/mongo/s/balancer/balancer.cpp b/src/mongo/s/balancer/balancer.cpp
index d4d463ace64..d991be6eb83 100644
--- a/src/mongo/s/balancer/balancer.cpp
+++ b/src/mongo/s/balancer/balancer.cpp
@@ -41,7 +41,7 @@
#include "mongo/db/client.h"
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
-#include "mongo/db/query/lite_parsed_query.h"
+#include "mongo/db/query/query_request.h"
#include "mongo/s/balancer/balancer_chunk_selection_policy_impl.h"
#include "mongo/s/balancer/balancer_configuration.h"
#include "mongo/s/balancer/cluster_statistics_impl.h"
@@ -164,7 +164,7 @@ void appendOperationDeadlineIfSet(OperationContext* txn, BSONObjBuilder* cmdBuil
if (!maxTimeMsArg) {
return;
}
- cmdBuilder->append(LiteParsedQuery::cmdOptionMaxTimeMS, *maxTimeMsArg);
+ cmdBuilder->append(QueryRequest::cmdOptionMaxTimeMS, *maxTimeMsArg);
}
/**
diff --git a/src/mongo/s/balancer/balancer_configuration_test.cpp b/src/mongo/s/balancer/balancer_configuration_test.cpp
index 5119b5180b7..217df4c5196 100644
--- a/src/mongo/s/balancer/balancer_configuration_test.cpp
+++ b/src/mongo/s/balancer/balancer_configuration_test.cpp
@@ -34,7 +34,7 @@
#include "mongo/bson/bsonmisc.h"
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/query/lite_parsed_query.h"
+#include "mongo/db/query/query_request.h"
#include "mongo/executor/remote_command_request.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/rpc/metadata/server_selection_metadata.h"
@@ -71,8 +71,7 @@ protected:
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), "config.settings");
- auto query =
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), "config.settings");
ASSERT_EQ(query->getFilter(), BSON("_id" << key));
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp
index 4bbdb9bcebd..54f414c79cb 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_add_shard_test.cpp
@@ -35,7 +35,7 @@
#include "mongo/client/remote_command_targeter_factory_mock.h"
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/commands.h"
-#include "mongo/db/query/lite_parsed_query.h"
+#include "mongo/db/query/query_request.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/rpc/metadata/server_selection_metadata.h"
#include "mongo/s/catalog/replset/catalog_manager_replica_set.h"
@@ -189,7 +189,7 @@ protected:
ASSERT_EQ(nss.toString(), DatabaseType::ConfigNS);
auto query =
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), DatabaseType::ConfigNS);
ASSERT_EQ(query->getFilter(), BSON(DatabaseType::name(dbname)));
@@ -338,7 +338,7 @@ TEST_F(AddShardTest, StandaloneGenerateName) {
ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.toString(), ShardType::ConfigNS);
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), ShardType::ConfigNS);
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_drop_coll_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_drop_coll_test.cpp
index 01d83c3bf14..8f5b49aab7e 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_drop_coll_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_drop_coll_test.cpp
@@ -33,7 +33,6 @@
#include "mongo/bson/json.h"
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/query/lite_parsed_query.h"
#include "mongo/rpc/metadata/repl_set_metadata.h"
#include "mongo/rpc/metadata/server_selection_metadata.h"
#include "mongo/s/catalog/dist_lock_manager_mock.h"
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_remove_shard_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_remove_shard_test.cpp
index c359b3f2874..8d3f9c1f21e 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_remove_shard_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_remove_shard_test.cpp
@@ -183,7 +183,7 @@ TEST_F(RemoveShardTest, RemoveShardStartDraining) {
ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(ShardType::ConfigNS, query->ns());
ASSERT_EQ(BSONObj(), query->getFilter());
@@ -355,7 +355,7 @@ TEST_F(RemoveShardTest, RemoveShardCompletion) {
ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(ShardType::ConfigNS, query->ns());
ASSERT_EQ(BSONObj(), query->getFilter());
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp
index a87244bf615..cd41e90c3ed 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_shard_collection_test.cpp
@@ -98,8 +98,7 @@ public:
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(DatabaseType::ConfigNS, nss.ns());
- auto query =
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(DatabaseType::ConfigNS, query->ns());
ASSERT_EQ(BSON(DatabaseType::name(expectedDb.getName())), query->getFilter());
@@ -163,8 +162,7 @@ public:
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), ChunkType::ConfigNS);
- auto query =
- assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
BSONObj expectedQuery =
BSON(ChunkType::ns(ns) << ChunkType::DEPRECATED_lastmod << GTE << Timestamp());
BSONObj expectedSort = BSON(ChunkType::DEPRECATED_lastmod() << 1);
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp
index 574a309c63b..b45914dfeba 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_test.cpp
@@ -35,7 +35,7 @@
#include "mongo/bson/json.h"
#include "mongo/client/remote_command_targeter_mock.h"
#include "mongo/db/commands.h"
-#include "mongo/db/query/lite_parsed_query.h"
+#include "mongo/db/query/query_request.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/task_executor.h"
@@ -99,30 +99,30 @@ TEST_F(CatalogManagerReplSetTest, GetCollectionExisting) {
catalogManager()->getCollection(operationContext(), expectedColl.getNs().ns()));
});
- onFindWithMetadataCommand([this, &expectedColl, newOpTime](
- const RemoteCommandRequest& request) {
+ onFindWithMetadataCommand(
+ [this, &expectedColl, newOpTime](const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
- const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss.ns(), CollectionType::ConfigNS);
+ const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
+ ASSERT_EQ(nss.ns(), CollectionType::ConfigNS);
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
- // Ensure the query is correct
- ASSERT_EQ(query->ns(), CollectionType::ConfigNS);
- ASSERT_EQ(query->getFilter(), BSON(CollectionType::fullNs(expectedColl.getNs().ns())));
- ASSERT_EQ(query->getSort(), BSONObj());
- ASSERT_EQ(query->getLimit().get(), 1);
+ // Ensure the query is correct
+ ASSERT_EQ(query->ns(), CollectionType::ConfigNS);
+ ASSERT_EQ(query->getFilter(), BSON(CollectionType::fullNs(expectedColl.getNs().ns())));
+ ASSERT_EQ(query->getSort(), BSONObj());
+ ASSERT_EQ(query->getLimit().get(), 1);
- checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
+ checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
- ReplSetMetadata metadata(10, OpTime(), newOpTime, 100, OID(), 30, -1);
- BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ ReplSetMetadata metadata(10, OpTime(), newOpTime, 100, OID(), 30, -1);
+ BSONObjBuilder builder;
+ metadata.writeToMetadata(&builder);
- return std::make_tuple(vector<BSONObj>{expectedColl.toBSON()}, builder.obj());
- });
+ return std::make_tuple(vector<BSONObj>{expectedColl.toBSON()}, builder.obj());
+ });
// Now wait for the getCollection call to return
const auto collOpTimePair = future.timed_get(kFutureTimeout);
@@ -170,7 +170,7 @@ TEST_F(CatalogManagerReplSetTest, GetDatabaseExisting) {
ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), DatabaseType::ConfigNS);
ASSERT_EQ(query->getFilter(), BSON(DatabaseType::name(expectedDb.getName())));
@@ -399,7 +399,7 @@ TEST_F(CatalogManagerReplSetTest, GetAllShardsValid) {
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), ShardType::ConfigNS);
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), ShardType::ConfigNS);
ASSERT_EQ(query->getFilter(), BSONObj());
@@ -494,7 +494,7 @@ TEST_F(CatalogManagerReplSetTest, GetChunksForNSWithSortAndLimit) {
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), ChunkType::ConfigNS);
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), ChunkType::ConfigNS);
ASSERT_EQ(query->getFilter(), chunksQuery);
@@ -541,7 +541,7 @@ TEST_F(CatalogManagerReplSetTest, GetChunksForNSNoSortNoLimit) {
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), ChunkType::ConfigNS);
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), ChunkType::ConfigNS);
ASSERT_EQ(query->getFilter(), chunksQuery);
@@ -883,28 +883,28 @@ TEST_F(CatalogManagerReplSetTest, GetCollectionsValidResultsNoDb) {
return collections;
});
- onFindWithMetadataCommand([this, coll1, coll2, coll3, newOpTime](
- const RemoteCommandRequest& request) {
- ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
+ onFindWithMetadataCommand(
+ [this, coll1, coll2, coll3, newOpTime](const RemoteCommandRequest& request) {
+ ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
- const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss.ns(), CollectionType::ConfigNS);
+ const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
+ ASSERT_EQ(nss.ns(), CollectionType::ConfigNS);
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
- ASSERT_EQ(query->ns(), CollectionType::ConfigNS);
- ASSERT_EQ(query->getFilter(), BSONObj());
- ASSERT_EQ(query->getSort(), BSONObj());
+ ASSERT_EQ(query->ns(), CollectionType::ConfigNS);
+ ASSERT_EQ(query->getFilter(), BSONObj());
+ ASSERT_EQ(query->getSort(), BSONObj());
- checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
+ checkReadConcern(request.cmdObj, Timestamp(0, 0), repl::OpTime::kUninitializedTerm);
- ReplSetMetadata metadata(10, OpTime(), newOpTime, 100, OID(), 30, -1);
- BSONObjBuilder builder;
- metadata.writeToMetadata(&builder);
+ ReplSetMetadata metadata(10, OpTime(), newOpTime, 100, OID(), 30, -1);
+ BSONObjBuilder builder;
+ metadata.writeToMetadata(&builder);
- return std::make_tuple(vector<BSONObj>{coll1.toBSON(), coll2.toBSON(), coll3.toBSON()},
- builder.obj());
- });
+ return std::make_tuple(vector<BSONObj>{coll1.toBSON(), coll2.toBSON(), coll3.toBSON()},
+ builder.obj());
+ });
const auto& actualColls = future.timed_get(kFutureTimeout);
ASSERT_EQ(3U, actualColls.size());
@@ -947,7 +947,7 @@ TEST_F(CatalogManagerReplSetTest, GetCollectionsValidResultsWithDb) {
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), CollectionType::ConfigNS);
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), CollectionType::ConfigNS);
{
@@ -995,7 +995,7 @@ TEST_F(CatalogManagerReplSetTest, GetCollectionsInvalidCollectionType) {
ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), CollectionType::ConfigNS);
{
@@ -1041,7 +1041,7 @@ TEST_F(CatalogManagerReplSetTest, GetDatabasesForShardValid) {
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), DatabaseType::ConfigNS);
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), DatabaseType::ConfigNS);
ASSERT_EQ(query->getFilter(), BSON(DatabaseType::primary(dbt1.getPrimary())));
@@ -1115,7 +1115,7 @@ TEST_F(CatalogManagerReplSetTest, GetTagsForCollection) {
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), TagsType::ConfigNS);
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), TagsType::ConfigNS);
ASSERT_EQ(query->getFilter(), BSON(TagsType::ns("TestDB.TestColl")));
@@ -1201,7 +1201,7 @@ TEST_F(CatalogManagerReplSetTest, GetTagForChunkOneTagFound) {
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), TagsType::ConfigNS);
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), TagsType::ConfigNS);
ASSERT_EQ(query->getFilter(),
@@ -1246,7 +1246,7 @@ TEST_F(CatalogManagerReplSetTest, GetTagForChunkNoTagFound) {
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), TagsType::ConfigNS);
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), TagsType::ConfigNS);
ASSERT_EQ(query->getFilter(),
@@ -1288,7 +1288,7 @@ TEST_F(CatalogManagerReplSetTest, GetTagForChunkInvalidTagDoc) {
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.ns(), TagsType::ConfigNS);
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(query->ns(), TagsType::ConfigNS);
ASSERT_EQ(query->getFilter(),
@@ -1517,7 +1517,7 @@ TEST_F(CatalogManagerReplSetTest, createDatabaseSuccess) {
ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(ShardType::ConfigNS, query->ns());
ASSERT_EQ(BSONObj(), query->getFilter());
@@ -1679,7 +1679,7 @@ TEST_F(CatalogManagerReplSetTest, createDatabaseDBExists) {
ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
BSONObjBuilder queryBuilder;
queryBuilder.appendRegex(
@@ -1718,7 +1718,7 @@ TEST_F(CatalogManagerReplSetTest, createDatabaseDBExistsDifferentCase) {
ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
BSONObjBuilder queryBuilder;
queryBuilder.appendRegex(
@@ -1765,7 +1765,7 @@ TEST_F(CatalogManagerReplSetTest, createDatabaseNoShards) {
onFindCommand([this](const RemoteCommandRequest& request) {
ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(ShardType::ConfigNS, query->ns());
ASSERT_EQ(BSONObj(), query->getFilter());
@@ -1804,7 +1804,7 @@ TEST_F(CatalogManagerReplSetTest, createDatabaseDuplicateKeyOnInsert) {
ASSERT_EQUALS(configHost, request.target);
ASSERT_EQUALS(kReplSecondaryOkMetadata, request.metadata);
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- auto query = assertGet(LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false));
+ auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
ASSERT_EQ(ShardType::ConfigNS, query->ns());
ASSERT_EQ(BSONObj(), query->getFilter());
@@ -1959,7 +1959,7 @@ TEST_F(CatalogManagerReplSetTest, EnableShardingNoDBExists) {
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(DatabaseType::ConfigNS, nss.toString());
- auto queryResult = LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false);
+ auto queryResult = QueryRequest::makeFromFindCommand(nss, request.cmdObj, false);
ASSERT_OK(queryResult.getStatus());
const auto& query = queryResult.getValue();
diff --git a/src/mongo/s/catalog/replset/catalog_manager_replica_set_write_retry_test.cpp b/src/mongo/s/catalog/replset/catalog_manager_replica_set_write_retry_test.cpp
index 27554868a9b..2f00e9533f6 100644
--- a/src/mongo/s/catalog/replset/catalog_manager_replica_set_write_retry_test.cpp
+++ b/src/mongo/s/catalog/replset/catalog_manager_replica_set_write_retry_test.cpp
@@ -164,7 +164,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterNetworkErrorMatch) {
onFindCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQ(request.target, kTestHosts[1]);
auto query =
- assertGet(LiteParsedQuery::makeFromFindCommand(kTestNamespace, request.cmdObj, false));
+ assertGet(QueryRequest::makeFromFindCommand(kTestNamespace, request.cmdObj, false));
ASSERT_EQ(BSON("_id" << 1), query->getFilter());
return vector<BSONObj>{objToInsert};
@@ -199,7 +199,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterNetworkErrorNotFound) {
onFindCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQ(request.target, kTestHosts[1]);
auto query =
- assertGet(LiteParsedQuery::makeFromFindCommand(kTestNamespace, request.cmdObj, false));
+ assertGet(QueryRequest::makeFromFindCommand(kTestNamespace, request.cmdObj, false));
ASSERT_EQ(BSON("_id" << 1), query->getFilter());
return vector<BSONObj>();
@@ -234,7 +234,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterNetworkErrorMismatch) {
onFindCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQ(request.target, kTestHosts[1]);
auto query =
- assertGet(LiteParsedQuery::makeFromFindCommand(kTestNamespace, request.cmdObj, false));
+ assertGet(QueryRequest::makeFromFindCommand(kTestNamespace, request.cmdObj, false));
ASSERT_EQ(BSON("_id" << 1), query->getFilter());
return vector<BSONObj>{BSON("_id" << 1 << "Value"
@@ -290,7 +290,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterWriteConcernFailureMatch) {
onFindCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQ(request.target, kTestHosts[0]);
auto query =
- assertGet(LiteParsedQuery::makeFromFindCommand(kTestNamespace, request.cmdObj, false));
+ assertGet(QueryRequest::makeFromFindCommand(kTestNamespace, request.cmdObj, false));
ASSERT_EQ(BSON("_id" << 1), query->getFilter());
return vector<BSONObj>{objToInsert};
diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp
index 5548ea4f642..c9f3af28227 100644
--- a/src/mongo/s/chunk_manager.cpp
+++ b/src/mongo/s/chunk_manager.cpp
@@ -483,10 +483,10 @@ shared_ptr<Chunk> ChunkManager::findIntersectingChunk(OperationContext* txn,
void ChunkManager::getShardIdsForQuery(OperationContext* txn,
const BSONObj& query,
set<ShardId>* shardIds) const {
- auto lpq = stdx::make_unique<LiteParsedQuery>(NamespaceString(_ns));
- lpq->setFilter(query);
+ auto qr = stdx::make_unique<QueryRequest>(NamespaceString(_ns));
+ qr->setFilter(query);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(lpq), ExtensionsCallbackNoop());
+ auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), ExtensionsCallbackNoop());
uassertStatusOK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
diff --git a/src/mongo/s/chunk_manager_targeter_test.cpp b/src/mongo/s/chunk_manager_targeter_test.cpp
index 24d2398defa..14a346644eb 100644
--- a/src/mongo/s/chunk_manager_targeter_test.cpp
+++ b/src/mongo/s/chunk_manager_targeter_test.cpp
@@ -61,10 +61,10 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr) {
BSONObj queryObj = fromjson(queryStr);
const NamespaceString nss("test.foo");
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(queryObj);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(queryObj);
auto statusWithCQ =
- CanonicalQuery::canonicalize(txn.get(), std::move(lpq), ExtensionsCallbackNoop());
+ CanonicalQuery::canonicalize(txn.get(), std::move(qr), ExtensionsCallbackNoop());
ASSERT_OK(statusWithCQ.getStatus());
return std::move(statusWithCQ.getValue());
}
diff --git a/src/mongo/s/client/shard_remote.cpp b/src/mongo/s/client/shard_remote.cpp
index 9a31ee60424..1ed6abe2d41 100644
--- a/src/mongo/s/client/shard_remote.cpp
+++ b/src/mongo/s/client/shard_remote.cpp
@@ -42,7 +42,7 @@
#include "mongo/client/replica_set_monitor.h"
#include "mongo/db/jsobj.h"
#include "mongo/db/operation_context.h"
-#include "mongo/db/query/lite_parsed_query.h"
+#include "mongo/db/query/query_request.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/rpc/get_status_from_command_result.h"
@@ -96,7 +96,7 @@ BSONObj appendMaxTimeToCmdObj(OperationContext* txn, const BSONObj& cmdObj) {
Milliseconds maxTime = kConfigCommandTimeout;
bool hasTxnMaxTime = txn->hasDeadline();
- bool hasUserMaxTime = !cmdObj[LiteParsedQuery::cmdOptionMaxTimeMS].eoo();
+ bool hasUserMaxTime = !cmdObj[QueryRequest::cmdOptionMaxTimeMS].eoo();
if (hasTxnMaxTime) {
maxTime = std::min(maxTime, duration_cast<Milliseconds>(txn->getRemainingMaxTimeMicros()));
@@ -109,7 +109,7 @@ BSONObj appendMaxTimeToCmdObj(OperationContext* txn, const BSONObj& cmdObj) {
}
if (hasUserMaxTime) {
- Milliseconds userMaxTime(cmdObj[LiteParsedQuery::cmdOptionMaxTimeMS].numberLong());
+ Milliseconds userMaxTime(cmdObj[QueryRequest::cmdOptionMaxTimeMS].numberLong());
if (userMaxTime <= maxTime) {
return cmdObj;
}
@@ -118,7 +118,7 @@ BSONObj appendMaxTimeToCmdObj(OperationContext* txn, const BSONObj& cmdObj) {
BSONObjBuilder updatedCmdBuilder;
if (hasUserMaxTime) { // Need to remove user provided maxTimeMS.
BSONObjIterator cmdObjIter(cmdObj);
- const char* maxTimeFieldName = LiteParsedQuery::cmdOptionMaxTimeMS;
+ const char* maxTimeFieldName = QueryRequest::cmdOptionMaxTimeMS;
while (cmdObjIter.more()) {
BSONElement e = cmdObjIter.next();
if (str::equals(e.fieldName(), maxTimeFieldName)) {
@@ -130,7 +130,7 @@ BSONObj appendMaxTimeToCmdObj(OperationContext* txn, const BSONObj& cmdObj) {
updatedCmdBuilder.appendElements(cmdObj);
}
- updatedCmdBuilder.append(LiteParsedQuery::cmdOptionMaxTimeMS,
+ updatedCmdBuilder.append(QueryRequest::cmdOptionMaxTimeMS,
durationCount<Milliseconds>(maxTime));
return updatedCmdBuilder.obj();
}
@@ -323,14 +323,14 @@ StatusWith<Shard::QueryResponse> ShardRemote::_exhaustiveFindOnConfig(
bob.done().getObjectField(repl::ReadConcernArgs::kReadConcernFieldName).getOwned();
}
- auto lpq = stdx::make_unique<LiteParsedQuery>(nss);
- lpq->setFilter(query);
- lpq->setSort(sort);
- lpq->setReadConcern(readConcernObj);
- lpq->setLimit(limit);
+ auto qr = stdx::make_unique<QueryRequest>(nss);
+ qr->setFilter(query);
+ qr->setSort(sort);
+ qr->setReadConcern(readConcernObj);
+ qr->setLimit(limit);
BSONObjBuilder findCmdBuilder;
- lpq->asFindCommand(&findCmdBuilder);
+ qr->asFindCommand(&findCmdBuilder);
Microseconds maxTime = std::min(duration_cast<Microseconds>(kConfigCommandTimeout),
txn->getRemainingMaxTimeMicros());
@@ -340,8 +340,7 @@ StatusWith<Shard::QueryResponse> ShardRemote::_exhaustiveFindOnConfig(
maxTime = Milliseconds{1};
}
- findCmdBuilder.append(LiteParsedQuery::cmdOptionMaxTimeMS,
- durationCount<Milliseconds>(maxTime));
+ findCmdBuilder.append(QueryRequest::cmdOptionMaxTimeMS, durationCount<Milliseconds>(maxTime));
Fetcher fetcher(Grid::get(txn)->getExecutorPool()->getFixedExecutor(),
host.getValue(),
diff --git a/src/mongo/s/commands/cluster_count_cmd.cpp b/src/mongo/s/commands/cluster_count_cmd.cpp
index 32306c3c7e2..4bb96e6b086 100644
--- a/src/mongo/s/commands/cluster_count_cmd.cpp
+++ b/src/mongo/s/commands/cluster_count_cmd.cpp
@@ -144,7 +144,7 @@ public:
}
const std::initializer_list<StringData> passthroughFields = {
- "hint", "$queryOptions", "readConcern", LiteParsedQuery::cmdOptionMaxTimeMS,
+ "hint", "$queryOptions", "readConcern", QueryRequest::cmdOptionMaxTimeMS,
};
for (auto name : passthroughFields) {
if (auto field = cmdObj[name]) {
diff --git a/src/mongo/s/commands/cluster_explain.cpp b/src/mongo/s/commands/cluster_explain.cpp
index 36fb17a0e61..e38a2d072b5 100644
--- a/src/mongo/s/commands/cluster_explain.cpp
+++ b/src/mongo/s/commands/cluster_explain.cpp
@@ -29,7 +29,6 @@
#include "mongo/platform/basic.h"
#include "mongo/bson/bsonmisc.h"
-#include "mongo/db/query/lite_parsed_query.h"
#include "mongo/rpc/metadata/server_selection_metadata.h"
#include "mongo/s/client/shard_registry.h"
#include "mongo/s/commands/cluster_explain.h"
diff --git a/src/mongo/s/commands/cluster_explain_cmd.cpp b/src/mongo/s/commands/cluster_explain_cmd.cpp
index a60bd3c197d..eddac77e971 100644
--- a/src/mongo/s/commands/cluster_explain_cmd.cpp
+++ b/src/mongo/s/commands/cluster_explain_cmd.cpp
@@ -31,7 +31,6 @@
#include "mongo/client/dbclientinterface.h"
#include "mongo/db/commands.h"
#include "mongo/db/query/explain.h"
-#include "mongo/db/query/lite_parsed_query.h"
#include "mongo/rpc/metadata/server_selection_metadata.h"
#include "mongo/s/query/cluster_find.h"
diff --git a/src/mongo/s/commands/cluster_find_cmd.cpp b/src/mongo/s/commands/cluster_find_cmd.cpp
index 2ff26bb1707..1bd898c2c37 100644
--- a/src/mongo/s/commands/cluster_find_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_cmd.cpp
@@ -111,15 +111,15 @@ public:
str::stream() << "Invalid collection name: " << nss.ns()};
}
- // Parse the command BSON to a LiteParsedQuery.
+ // Parse the command BSON to a QueryRequest.
bool isExplain = true;
- auto lpq = LiteParsedQuery::makeFromFindCommand(std::move(nss), cmdObj, isExplain);
- if (!lpq.isOK()) {
- return lpq.getStatus();
+ auto qr = QueryRequest::makeFromFindCommand(std::move(nss), cmdObj, isExplain);
+ if (!qr.isOK()) {
+ return qr.getStatus();
}
return Strategy::explainFind(
- txn, cmdObj, *lpq.getValue(), verbosity, serverSelectionMetadata, out);
+ txn, cmdObj, *qr.getValue(), verbosity, serverSelectionMetadata, out);
}
bool run(OperationContext* txn,
@@ -139,13 +139,13 @@ public:
}
const bool isExplain = false;
- auto lpq = LiteParsedQuery::makeFromFindCommand(nss, cmdObj, isExplain);
- if (!lpq.isOK()) {
- return appendCommandStatus(result, lpq.getStatus());
+ auto qr = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ if (!qr.isOK()) {
+ return appendCommandStatus(result, qr.getStatus());
}
auto cq =
- CanonicalQuery::canonicalize(txn, std::move(lpq.getValue()), ExtensionsCallbackNoop());
+ CanonicalQuery::canonicalize(txn, std::move(qr.getValue()), ExtensionsCallbackNoop());
if (!cq.isOK()) {
return appendCommandStatus(result, cq.getStatus());
}
diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
index 352d5969927..cd4e887a979 100644
--- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
+++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp
@@ -94,7 +94,7 @@ BSONObj fixForShards(const BSONObj& orig,
if (fn == bypassDocumentValidationCommandOption() || fn == "map" || fn == "mapreduce" ||
fn == "mapReduce" || fn == "mapparams" || fn == "reduce" || fn == "query" ||
fn == "sort" || fn == "scope" || fn == "verbose" || fn == "$queryOptions" ||
- fn == "readConcern" || fn == LiteParsedQuery::cmdOptionMaxTimeMS) {
+ fn == "readConcern" || fn == QueryRequest::cmdOptionMaxTimeMS) {
b.append(e);
} else if (fn == "out" || fn == "finalize" || fn == "writeConcern") {
// We don't want to copy these
@@ -406,7 +406,7 @@ public:
BSONObj aggCounts = aggCountsB.done();
finalCmd.append("counts", aggCounts);
- if (auto elem = cmdObj[LiteParsedQuery::cmdOptionMaxTimeMS])
+ if (auto elem = cmdObj[QueryRequest::cmdOptionMaxTimeMS])
finalCmd.append(elem);
if (auto elem = cmdObj[bypassDocumentValidationCommandOption()])
finalCmd.append(elem);
diff --git a/src/mongo/s/commands/cluster_pipeline_cmd.cpp b/src/mongo/s/commands/cluster_pipeline_cmd.cpp
index e4e3dde7fbf..8fb7a896f89 100644
--- a/src/mongo/s/commands/cluster_pipeline_cmd.cpp
+++ b/src/mongo/s/commands/cluster_pipeline_cmd.cpp
@@ -165,7 +165,7 @@ public:
}
const std::initializer_list<StringData> fieldsToPropagateToShards = {
- "$queryOptions", "readConcern", LiteParsedQuery::cmdOptionMaxTimeMS,
+ "$queryOptions", "readConcern", QueryRequest::cmdOptionMaxTimeMS,
};
for (auto&& field : fieldsToPropagateToShards) {
commandBuilder[field] = Value(cmdObj[field]);
@@ -225,9 +225,9 @@ public:
mergeCmd["$queryOptions"] = Value(cmdObj["$queryOptions"]);
}
- if (cmdObj.hasField(LiteParsedQuery::cmdOptionMaxTimeMS)) {
- mergeCmd[LiteParsedQuery::cmdOptionMaxTimeMS] =
- Value(cmdObj[LiteParsedQuery::cmdOptionMaxTimeMS]);
+ if (cmdObj.hasField(QueryRequest::cmdOptionMaxTimeMS)) {
+ mergeCmd[QueryRequest::cmdOptionMaxTimeMS] =
+ Value(cmdObj[QueryRequest::cmdOptionMaxTimeMS]);
}
mergeCmd.setField("writeConcern", Value(cmdObj["writeConcern"]));
diff --git a/src/mongo/s/commands/commands_public.cpp b/src/mongo/s/commands/commands_public.cpp
index a2215e527a4..4e7993a97ee 100644
--- a/src/mongo/s/commands/commands_public.cpp
+++ b/src/mongo/s/commands/commands_public.cpp
@@ -42,7 +42,6 @@
#include "mongo/db/commands/copydb.h"
#include "mongo/db/commands/rename_collection.h"
#include "mongo/db/lasterror.h"
-#include "mongo/db/query/lite_parsed_query.h"
#include "mongo/executor/task_executor_pool.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/s/catalog/catalog_cache.h"
diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp
index 214b0568849..fcba8883592 100644
--- a/src/mongo/s/commands/strategy.cpp
+++ b/src/mongo/s/commands/strategy.cpp
@@ -49,7 +49,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/query/find_common.h"
#include "mongo/db/query/getmore_request.h"
-#include "mongo/db/query/lite_parsed_query.h"
+#include "mongo/db/query/query_request.h"
#include "mongo/db/server_parameters.h"
#include "mongo/db/stats/counters.h"
#include "mongo/rpc/metadata/server_selection_metadata.h"
@@ -144,8 +144,8 @@ void Strategy::queryOp(OperationContext* txn, Request& request) {
ReadPreferenceSetting readPreference(readPreferenceOption, TagSet());
BSONElement rpElem;
- auto readPrefExtractStatus = bsonExtractTypedField(
- q.query, LiteParsedQuery::kWrappedReadPrefField, mongo::Object, &rpElem);
+ auto readPrefExtractStatus =
+ bsonExtractTypedField(q.query, QueryRequest::kWrappedReadPrefField, mongo::Object, &rpElem);
if (readPrefExtractStatus.isOK()) {
auto parsedRps = ReadPreferenceSetting::fromBSON(rpElem.Obj());
@@ -160,9 +160,9 @@ void Strategy::queryOp(OperationContext* txn, Request& request) {
// If the $explain flag was set, we must run the operation on the shards as an explain command
// rather than a find command.
- if (canonicalQuery.getValue()->getParsed().isExplain()) {
- const LiteParsedQuery& lpq = canonicalQuery.getValue()->getParsed();
- BSONObj findCommand = lpq.asFindCommand();
+ if (canonicalQuery.getValue()->getQueryRequest().isExplain()) {
+ const QueryRequest& qr = canonicalQuery.getValue()->getQueryRequest();
+ BSONObj findCommand = qr.asFindCommand();
// We default to allPlansExecution verbosity.
auto verbosity = ExplainCommon::EXEC_ALL_PLANS;
@@ -172,7 +172,7 @@ void Strategy::queryOp(OperationContext* txn, Request& request) {
BSONObjBuilder explainBuilder;
uassertStatusOK(
- Strategy::explainFind(txn, findCommand, lpq, verbosity, metadata, &explainBuilder));
+ Strategy::explainFind(txn, findCommand, qr, verbosity, metadata, &explainBuilder));
BSONObj explainObj = explainBuilder.done();
replyToQuery(0, // query result flags
@@ -507,7 +507,7 @@ void Strategy::writeOp(OperationContext* txn, int op, Request& request) {
Status Strategy::explainFind(OperationContext* txn,
const BSONObj& findCommand,
- const LiteParsedQuery& lpq,
+ const QueryRequest& qr,
ExplainCommon::Verbosity verbosity,
const rpc::ServerSelectionMetadata& serverSelectionMetadata,
BSONObjBuilder* out) {
@@ -521,11 +521,11 @@ Status Strategy::explainFind(OperationContext* txn,
std::vector<Strategy::CommandResult> shardResults;
Strategy::commandOp(txn,
- lpq.nss().db().toString(),
+ qr.nss().db().toString(),
explainCmdBob.obj(),
options,
- lpq.nss().toString(),
- lpq.getFilter(),
+ qr.nss().toString(),
+ qr.getFilter(),
&shardResults);
long long millisElapsed = timer.millis();
diff --git a/src/mongo/s/commands/strategy.h b/src/mongo/s/commands/strategy.h
index ebd9e36c520..f45031e2e0f 100644
--- a/src/mongo/s/commands/strategy.h
+++ b/src/mongo/s/commands/strategy.h
@@ -36,9 +36,9 @@
namespace mongo {
-class LiteParsedQuery;
class OperationContext;
class QueryMessage;
+class QueryRequest;
class Request;
namespace rpc {
@@ -68,7 +68,7 @@ public:
*/
static Status explainFind(OperationContext* txn,
const BSONObj& findCommand,
- const LiteParsedQuery& lpq,
+ const QueryRequest& qr,
ExplainCommon::Verbosity verbosity,
const rpc::ServerSelectionMetadata& serverSelectionMetadata,
BSONObjBuilder* out);
diff --git a/src/mongo/s/query/SConscript b/src/mongo/s/query/SConscript
index ae3d0c3e786..d2f2a03f3ed 100644
--- a/src/mongo/s/query/SConscript
+++ b/src/mongo/s/query/SConscript
@@ -87,7 +87,7 @@ env.CppUnitTest(
LIBDEPS=[
'async_results_merger',
'$BUILD_DIR/mongo/db/auth/authorization_manager_mock_init',
- '$BUILD_DIR/mongo/db/query/lite_parsed_query',
+ '$BUILD_DIR/mongo/db/query/query_request',
'$BUILD_DIR/mongo/db/service_context_noop_init',
'$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture',
'$BUILD_DIR/mongo/s/mongoscore',
diff --git a/src/mongo/s/query/async_results_merger_test.cpp b/src/mongo/s/query/async_results_merger_test.cpp
index 7159d6c0271..fc5a8989a61 100644
--- a/src/mongo/s/query/async_results_merger_test.cpp
+++ b/src/mongo/s/query/async_results_merger_test.cpp
@@ -35,7 +35,7 @@
#include "mongo/db/json.h"
#include "mongo/db/query/cursor_response.h"
#include "mongo/db/query/getmore_request.h"
-#include "mongo/db/query/lite_parsed_query.h"
+#include "mongo/db/query/query_request.h"
#include "mongo/executor/network_interface_mock.h"
#include "mongo/executor/task_executor.h"
#include "mongo/executor/thread_pool_task_executor_test_fixture.h"
@@ -105,17 +105,17 @@ protected:
boost::optional<long long> getMoreBatchSize = boost::none,
ReadPreferenceSetting readPref = ReadPreferenceSetting(ReadPreference::PrimaryOnly)) {
const bool isExplain = true;
- const auto lpq =
- unittest::assertGet(LiteParsedQuery::makeFromFindCommand(_nss, findCmd, isExplain));
+ const auto qr =
+ unittest::assertGet(QueryRequest::makeFromFindCommand(_nss, findCmd, isExplain));
ClusterClientCursorParams params = ClusterClientCursorParams(_nss, readPref);
- params.sort = lpq->getSort();
- params.limit = lpq->getLimit();
- params.batchSize = getMoreBatchSize ? getMoreBatchSize : lpq->getBatchSize();
- params.skip = lpq->getSkip();
- params.isTailable = lpq->isTailable();
- params.isAwaitData = lpq->isAwaitData();
- params.isAllowPartialResults = lpq->isAllowPartialResults();
+ params.sort = qr->getSort();
+ params.limit = qr->getLimit();
+ params.batchSize = getMoreBatchSize ? getMoreBatchSize : qr->getBatchSize();
+ params.skip = qr->getSkip();
+ params.isTailable = qr->isTailable();
+ params.isAwaitData = qr->isAwaitData();
+ params.isAllowPartialResults = qr->isAllowPartialResults();
for (const auto& shardId : shardIds) {
params.remotes.emplace_back(shardId, findCmd);
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index 18f44d9e510..7163f76281e 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -74,75 +74,75 @@ static const BSONObj kSortKeyMetaProjection = BSON("$meta"
static const int kPerDocumentOverheadBytesUpperBound = 10;
/**
- * Given the LiteParsedQuery 'lpq' being executed by mongos, returns a copy of the query which is
+ * Given the QueryRequest 'qr' being executed by mongos, returns a copy of the query which is
* suitable for forwarding to the targeted hosts.
*/
-StatusWith<std::unique_ptr<LiteParsedQuery>> transformQueryForShards(const LiteParsedQuery& lpq) {
+StatusWith<std::unique_ptr<QueryRequest>> transformQueryForShards(const QueryRequest& qr) {
// If there is a limit, we forward the sum of the limit and the skip.
boost::optional<long long> newLimit;
- if (lpq.getLimit()) {
+ if (qr.getLimit()) {
long long newLimitValue;
- if (mongoSignedAddOverflow64(*lpq.getLimit(), lpq.getSkip().value_or(0), &newLimitValue)) {
+ if (mongoSignedAddOverflow64(*qr.getLimit(), qr.getSkip().value_or(0), &newLimitValue)) {
return Status(
ErrorCodes::Overflow,
str::stream()
<< "sum of limit and skip cannot be represented as a 64-bit integer, limit: "
- << *lpq.getLimit()
+ << *qr.getLimit()
<< ", skip: "
- << lpq.getSkip().value_or(0));
+ << qr.getSkip().value_or(0));
}
newLimit = newLimitValue;
}
// Similarly, if nToReturn is set, we forward the sum of nToReturn and the skip.
boost::optional<long long> newNToReturn;
- if (lpq.getNToReturn()) {
+ if (qr.getNToReturn()) {
// !wantMore and ntoreturn mean the same as !wantMore and limit, so perform the conversion.
- if (!lpq.wantMore()) {
+ if (!qr.wantMore()) {
long long newLimitValue;
if (mongoSignedAddOverflow64(
- *lpq.getNToReturn(), lpq.getSkip().value_or(0), &newLimitValue)) {
+ *qr.getNToReturn(), qr.getSkip().value_or(0), &newLimitValue)) {
return Status(ErrorCodes::Overflow,
str::stream()
<< "sum of ntoreturn and skip cannot be represented as a 64-bit "
"integer, ntoreturn: "
- << *lpq.getNToReturn()
+ << *qr.getNToReturn()
<< ", skip: "
- << lpq.getSkip().value_or(0));
+ << qr.getSkip().value_or(0));
}
newLimit = newLimitValue;
} else {
long long newNToReturnValue;
if (mongoSignedAddOverflow64(
- *lpq.getNToReturn(), lpq.getSkip().value_or(0), &newNToReturnValue)) {
+ *qr.getNToReturn(), qr.getSkip().value_or(0), &newNToReturnValue)) {
return Status(ErrorCodes::Overflow,
str::stream()
<< "sum of ntoreturn and skip cannot be represented as a 64-bit "
"integer, ntoreturn: "
- << *lpq.getNToReturn()
+ << *qr.getNToReturn()
<< ", skip: "
- << lpq.getSkip().value_or(0));
+ << qr.getSkip().value_or(0));
}
newNToReturn = newNToReturnValue;
}
}
// If there is a sort other than $natural, we send a sortKey meta-projection to the remote node.
- BSONObj newProjection = lpq.getProj();
- if (!lpq.getSort().isEmpty() && !lpq.getSort()["$natural"]) {
+ BSONObj newProjection = qr.getProj();
+ if (!qr.getSort().isEmpty() && !qr.getSort()["$natural"]) {
BSONObjBuilder projectionBuilder;
- projectionBuilder.appendElements(lpq.getProj());
+ projectionBuilder.appendElements(qr.getProj());
projectionBuilder.append(ClusterClientCursorParams::kSortKeyField, kSortKeyMetaProjection);
newProjection = projectionBuilder.obj();
}
- auto newLPQ = stdx::make_unique<LiteParsedQuery>(lpq);
- newLPQ->setProj(newProjection);
- newLPQ->setSkip(boost::none);
- newLPQ->setLimit(newLimit);
- newLPQ->setNToReturn(newNToReturn);
- invariantOK(newLPQ->validate());
- return std::move(newLPQ);
+ auto newQR = stdx::make_unique<QueryRequest>(qr);
+ newQR->setProj(newProjection);
+ newQR->setSkip(boost::none);
+ newQR->setLimit(newLimit);
+ newQR->setNToReturn(newNToReturn);
+ invariantOK(newQR->validate());
+ return std::move(newQR);
}
StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
@@ -161,7 +161,7 @@ StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
invariant(chunkManager);
std::set<ShardId> shardIds;
- chunkManager->getShardIdsForQuery(txn, query.getParsed().getFilter(), &shardIds);
+ chunkManager->getShardIdsForQuery(txn, query.getQueryRequest().getFilter(), &shardIds);
for (auto id : shardIds) {
auto shard = shardRegistry->getShard(txn, id);
@@ -174,12 +174,12 @@ StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
}
ClusterClientCursorParams params(query.nss(), readPref);
- params.limit = query.getParsed().getLimit();
- params.batchSize = query.getParsed().getEffectiveBatchSize();
- params.skip = query.getParsed().getSkip();
- params.isTailable = query.getParsed().isTailable();
- params.isAwaitData = query.getParsed().isAwaitData();
- params.isAllowPartialResults = query.getParsed().isAllowPartialResults();
+ params.limit = query.getQueryRequest().getLimit();
+ params.batchSize = query.getQueryRequest().getEffectiveBatchSize();
+ params.skip = query.getQueryRequest().getSkip();
+ params.isTailable = query.getQueryRequest().isTailable();
+ params.isAwaitData = query.getQueryRequest().isAwaitData();
+ params.isAllowPartialResults = query.getQueryRequest().isAllowPartialResults();
// This is the batchSize passed to each subsequent getMore command issued by the cursor. We
// usually use the batchSize associated with the initial find, but as it is illegal to send a
@@ -191,16 +191,16 @@ StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
// $natural sort is actually a hint to use a collection scan, and shouldn't be treated like a
// sort on mongos. Including a $natural anywhere in the sort spec results in the whole sort
// being considered a hint to use a collection scan.
- if (!query.getParsed().getSort().hasField("$natural")) {
- params.sort = FindCommon::transformSortSpec(query.getParsed().getSort());
+ if (!query.getQueryRequest().getSort().hasField("$natural")) {
+ params.sort = FindCommon::transformSortSpec(query.getQueryRequest().getSort());
}
// Tailable cursors can't have a sort, which should have already been validated.
invariant(params.sort.isEmpty() || !params.isTailable);
- const auto lpqToForward = transformQueryForShards(query.getParsed());
- if (!lpqToForward.isOK()) {
- return lpqToForward.getStatus();
+ const auto qrToForward = transformQueryForShards(query.getQueryRequest());
+ if (!qrToForward.isOK()) {
+ return qrToForward.getStatus();
}
// Use read pref to target a particular host from each shard. Also construct the find command
@@ -210,7 +210,7 @@ StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
// Build the find command, and attach shard version if necessary.
BSONObjBuilder cmdBuilder;
- lpqToForward.getValue()->asFindCommand(&cmdBuilder);
+ qrToForward.getValue()->asFindCommand(&cmdBuilder);
if (chunkManager) {
ChunkVersion version(chunkManager->getVersion(shard->getId()));
@@ -228,7 +228,7 @@ StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
auto cursorState = ClusterCursorManager::CursorState::NotExhausted;
int bytesBuffered = 0;
- while (!FindCommon::enoughForFirstBatch(query.getParsed(), results->size())) {
+ while (!FindCommon::enoughForFirstBatch(query.getQueryRequest(), results->size())) {
auto next = ccc->next();
if (!next.isOK()) {
return next.getStatus();
@@ -258,7 +258,7 @@ StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
results->push_back(std::move(*next.getValue()));
}
- if (!query.getParsed().wantMore() && !ccc->isTailable()) {
+ if (!query.getQueryRequest().wantMore() && !ccc->isTailable()) {
cursorState = ClusterCursorManager::CursorState::Exhausted;
}
@@ -272,7 +272,7 @@ StatusWith<CursorId> runQueryWithoutRetrying(OperationContext* txn,
auto cursorManager = grid.getCursorManager();
const auto cursorType = chunkManager ? ClusterCursorManager::CursorType::NamespaceSharded
: ClusterCursorManager::CursorType::NamespaceNotSharded;
- const auto cursorLifetime = query.getParsed().isNoCursorTimeout()
+ const auto cursorLifetime = query.getQueryRequest().isNoCursorTimeout()
? ClusterCursorManager::CursorLifetime::Immortal
: ClusterCursorManager::CursorLifetime::Mortal;
return cursorManager->registerCursor(
@@ -290,12 +290,12 @@ StatusWith<CursorId> ClusterFind::runQuery(OperationContext* txn,
invariant(results);
// Projection on the reserved sort key field is illegal in mongos.
- if (query.getParsed().getProj().hasField(ClusterClientCursorParams::kSortKeyField)) {
+ if (query.getQueryRequest().getProj().hasField(ClusterClientCursorParams::kSortKeyField)) {
return {ErrorCodes::BadValue,
str::stream() << "Projection contains illegal field '"
<< ClusterClientCursorParams::kSortKeyField
<< "': "
- << query.getParsed().getProj()};
+ << query.getQueryRequest().getProj()};
}
auto dbConfig = grid.catalogCache()->getDatabase(txn, query.nss().db().toString());
@@ -417,14 +417,13 @@ StatusWith<ReadPreferenceSetting> ClusterFind::extractUnwrappedReadPref(const BS
const bool isSlaveOk) {
BSONElement queryOptionsElt;
auto status = bsonExtractTypedField(
- cmdObj, LiteParsedQuery::kUnwrappedReadPrefField, BSONType::Object, &queryOptionsElt);
+ cmdObj, QueryRequest::kUnwrappedReadPrefField, BSONType::Object, &queryOptionsElt);
if (status.isOK()) {
// There must be a nested object containing the read preference if there is a queryOptions
// field.
BSONObj queryOptionsObj = queryOptionsElt.Obj();
- invariant(queryOptionsObj[LiteParsedQuery::kWrappedReadPrefField].type() ==
- BSONType::Object);
- BSONObj readPrefObj = queryOptionsObj[LiteParsedQuery::kWrappedReadPrefField].Obj();
+ invariant(queryOptionsObj[QueryRequest::kWrappedReadPrefField].type() == BSONType::Object);
+ BSONObj readPrefObj = queryOptionsObj[QueryRequest::kWrappedReadPrefField].Obj();
auto readPref = ReadPreferenceSetting::fromBSON(readPrefObj);
if (!readPref.isOK()) {
diff --git a/src/mongo/s/set_shard_version_request.cpp b/src/mongo/s/set_shard_version_request.cpp
index f6afef3b4e4..b21c4423a84 100644
--- a/src/mongo/s/set_shard_version_request.cpp
+++ b/src/mongo/s/set_shard_version_request.cpp
@@ -34,7 +34,7 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/bson/util/bson_extract.h"
-#include "mongo/db/query/lite_parsed_query.h"
+#include "mongo/db/query/query_request.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/mongoutils/str.h"
@@ -218,7 +218,7 @@ BSONObj SetShardVersionRequest::toBSON() const {
if (_init) {
// Always include a 30 second timeout on sharding state initialization, to work around
// SERVER-21458.
- cmdBuilder.append(LiteParsedQuery::cmdOptionMaxTimeMS, 30000);
+ cmdBuilder.append(QueryRequest::cmdOptionMaxTimeMS, 30000);
} else {
_version.get().appendForSetShardVersion(&cmdBuilder);
}
diff --git a/src/mongo/s/shard_key_pattern.cpp b/src/mongo/s/shard_key_pattern.cpp
index 607f2fc2a42..3ec6bd8809e 100644
--- a/src/mongo/s/shard_key_pattern.cpp
+++ b/src/mongo/s/shard_key_pattern.cpp
@@ -270,10 +270,10 @@ StatusWith<BSONObj> ShardKeyPattern::extractShardKeyFromQuery(OperationContext*
if (!isValid())
return StatusWith<BSONObj>(BSONObj());
- auto lpq = stdx::make_unique<LiteParsedQuery>(NamespaceString(""));
- lpq->setFilter(basicQuery);
+ auto qr = stdx::make_unique<QueryRequest>(NamespaceString(""));
+ qr->setFilter(basicQuery);
- auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(lpq), ExtensionsCallbackNoop());
+ auto statusWithCQ = CanonicalQuery::canonicalize(txn, std::move(qr), ExtensionsCallbackNoop());
if (!statusWithCQ.isOK()) {
return StatusWith<BSONObj>(statusWithCQ.getStatus());
}
diff --git a/src/mongo/s/sharding_test_fixture.cpp b/src/mongo/s/sharding_test_fixture.cpp
index 3c6660fcc61..60347c00b03 100644
--- a/src/mongo/s/sharding_test_fixture.cpp
+++ b/src/mongo/s/sharding_test_fixture.cpp
@@ -39,7 +39,7 @@
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/namespace_string.h"
-#include "mongo/db/query/lite_parsed_query.h"
+#include "mongo/db/query/query_request.h"
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/db/service_context_noop.h"
#include "mongo/executor/network_interface_mock.h"
@@ -283,7 +283,7 @@ void ShardingTestFixture::expectGetShards(const std::vector<ShardType>& shards)
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss.toString(), ShardType::ConfigNS);
- auto queryResult = LiteParsedQuery::makeFromFindCommand(nss, request.cmdObj, false);
+ auto queryResult = QueryRequest::makeFromFindCommand(nss, request.cmdObj, false);
ASSERT_OK(queryResult.getStatus());
const auto& query = queryResult.getValue();
diff --git a/src/mongo/shell/bench.cpp b/src/mongo/shell/bench.cpp
index 8413f12b731..cf67b30b405 100644
--- a/src/mongo/shell/bench.cpp
+++ b/src/mongo/shell/bench.cpp
@@ -41,7 +41,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/query/cursor_response.h"
#include "mongo/db/query/getmore_request.h"
-#include "mongo/db/query/lite_parsed_query.h"
+#include "mongo/db/query/query_request.h"
#include "mongo/scripting/bson_template_evaluator.h"
#include "mongo/scripting/engine.h"
#include "mongo/stdx/thread.h"
@@ -615,22 +615,22 @@ bool BenchRunWorker::shouldCollectStats() const {
void doNothing(const BSONObj&) {}
/**
- * Issues the query 'lpq' against 'conn' using read commands. Returns the size of the result set
+ * Issues the query 'qr' against 'conn' using read commands. Returns the size of the result set
* returned by the query.
*
- * If 'lpq' has the 'wantMore' flag set to false and the 'limit' option set to 1LL, then the caller
+ * If 'qr' has the 'wantMore' flag set to false and the 'limit' option set to 1LL, then the caller
* may optionally specify a pointer to an object in 'objOut', which will be filled in with the
* single object in the query result set (or the empty object, if the result set is empty).
- * If 'lpq' doesn't have these options set, then nullptr must be passed for 'objOut'.
+ * If 'qr' doesn't have these options set, then nullptr must be passed for 'objOut'.
*
* On error, throws a UserException.
*/
int runQueryWithReadCommands(DBClientBase* conn,
- unique_ptr<LiteParsedQuery> lpq,
+ unique_ptr<QueryRequest> qr,
BSONObj* objOut = nullptr) {
- std::string dbName = lpq->nss().db().toString();
+ std::string dbName = qr->nss().db().toString();
BSONObj findCommandResult;
- bool res = conn->runCommand(dbName, lpq->asFindCommand(), findCommandResult);
+ bool res = conn->runCommand(dbName, qr->asFindCommand(), findCommandResult);
uassert(ErrorCodes::CommandFailed,
str::stream() << "find command failed; reply was: " << findCommandResult,
res);
@@ -640,7 +640,7 @@ int runQueryWithReadCommands(DBClientBase* conn,
int count = cursorResponse.getBatch().size();
if (objOut) {
- invariant(lpq->getLimit() && *lpq->getLimit() == 1 && !lpq->wantMore());
+ invariant(qr->getLimit() && *qr->getLimit() == 1 && !qr->wantMore());
// Since this is a "single batch" query, we can simply grab the first item in the result set
// and return here.
*objOut = (count > 0) ? cursorResponse.getBatch()[0] : BSONObj();
@@ -648,9 +648,9 @@ int runQueryWithReadCommands(DBClientBase* conn,
}
while (cursorResponse.getCursorId() != 0) {
- GetMoreRequest getMoreRequest(lpq->nss(),
+ GetMoreRequest getMoreRequest(qr->nss(),
cursorResponse.getCursorId(),
- lpq->getBatchSize(),
+ qr->getBatchSize(),
boost::none, // maxTimeMS
boost::none, // term
boost::none); // lastKnownCommittedOpTime
@@ -714,15 +714,15 @@ void BenchRunWorker::generateLoadOnConnection(DBClientBase* conn) {
BSONObj fixedQuery = fixQuery(op.query, bsonTemplateEvaluator);
BSONObj result;
if (op.useReadCmd) {
- auto lpq = stdx::make_unique<LiteParsedQuery>(NamespaceString(op.ns));
- lpq->setFilter(fixedQuery);
- lpq->setProj(op.projection);
- lpq->setLimit(1LL);
- lpq->setWantMore(false);
- invariantOK(lpq->validate());
+ auto qr = stdx::make_unique<QueryRequest>(NamespaceString(op.ns));
+ qr->setFilter(fixedQuery);
+ qr->setProj(op.projection);
+ qr->setLimit(1LL);
+ qr->setWantMore(false);
+ invariantOK(qr->validate());
BenchRunEventTrace _bret(&stats.findOneCounter);
- runQueryWithReadCommands(conn, std::move(lpq), &result);
+ runQueryWithReadCommands(conn, std::move(qr), &result);
} else {
BenchRunEventTrace _bret(&stats.findOneCounter);
result = conn->findOne(op.ns, fixedQuery);
@@ -816,22 +816,22 @@ void BenchRunWorker::generateLoadOnConnection(DBClientBase* conn) {
"cannot use 'options' in combination with read commands",
!op.options);
- auto lpq = stdx::make_unique<LiteParsedQuery>(NamespaceString(op.ns));
- lpq->setFilter(fixedQuery);
- lpq->setProj(op.projection);
+ auto qr = stdx::make_unique<QueryRequest>(NamespaceString(op.ns));
+ qr->setFilter(fixedQuery);
+ qr->setProj(op.projection);
if (op.skip) {
- lpq->setSkip(op.skip);
+ qr->setSkip(op.skip);
}
if (op.limit) {
- lpq->setLimit(op.limit);
+ qr->setLimit(op.limit);
}
if (op.batchSize) {
- lpq->setBatchSize(op.batchSize);
+ qr->setBatchSize(op.batchSize);
}
- invariantOK(lpq->validate());
+ invariantOK(qr->validate());
BenchRunEventTrace _bret(&stats.queryCounter);
- count = runQueryWithReadCommands(conn, std::move(lpq));
+ count = runQueryWithReadCommands(conn, std::move(qr));
} else {
// Use special query function for exhaust query option.
if (op.options & QueryOption_Exhaust) {