summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorArun Banala <arun.banala@mongodb.com>2020-11-02 18:09:28 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-01-07 20:04:53 +0000
commitc8ddb496f147e5a977eb668247b81da313d06e1d (patch)
tree34ba0cc443ec6294c5585d0951d5430cc69349e2 /src
parentd0cfd1d0c0fcfd75525a8d8f3a050876bc9d1e40 (diff)
downloadmongo-c8ddb496f147e5a977eb668247b81da313d06e1d.tar.gz
SERVER-51619 Convert find command input to IDL
Diffstat (limited to 'src')
-rw-r--r--src/mongo/client/dbclient_cursor.cpp13
-rw-r--r--src/mongo/db/commands/find_cmd.cpp3
-rw-r--r--src/mongo/db/matcher/expression_optimize_test.cpp15
-rw-r--r--src/mongo/db/namespace_string.cpp17
-rw-r--r--src/mongo/db/namespace_string.h15
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp9
-rw-r--r--src/mongo/db/pipeline/sharded_agg_helpers.cpp2
-rw-r--r--src/mongo/db/query/SConscript15
-rw-r--r--src/mongo/db/query/canonical_query.h2
-rw-r--r--src/mongo/db/query/canonical_query_test.cpp18
-rw-r--r--src/mongo/db/query/find.cpp4
-rw-r--r--src/mongo/db/query/find_command.idl197
-rw-r--r--src/mongo/db/query/parsed_distinct.cpp4
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp8
-rw-r--r--src/mongo/db/query/planner_analysis.cpp9
-rw-r--r--src/mongo/db/query/query_planner_array_test.cpp10
-rw-r--r--src/mongo/db/query/query_planner_hashed_index_test.cpp83
-rw-r--r--src/mongo/db/query/query_planner_test_fixture.cpp17
-rw-r--r--src/mongo/db/query/query_planner_tree_test.cpp5
-rw-r--r--src/mongo/db/query/query_planner_wildcard_index_test.cpp3
-rw-r--r--src/mongo/db/query/query_request.cpp760
-rw-r--r--src/mongo/db/query/query_request.h295
-rw-r--r--src/mongo/db/query/query_request_test.cpp640
-rw-r--r--src/mongo/db/query/query_solution.h3
-rw-r--r--src/mongo/db/s/config/initial_split_policy.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager.cpp2
-rw-r--r--src/mongo/dbtests/query_stage_subplan.cpp4
-rw-r--r--src/mongo/idl/basic_types.h2
-rw-r--r--src/mongo/idl/idl_parser.cpp25
-rw-r--r--src/mongo/idl/idl_parser.h8
-rw-r--r--src/mongo/idl/idl_test.cpp44
-rw-r--r--src/mongo/idl/unittest.idl8
-rw-r--r--src/mongo/s/balancer_configuration_test.cpp6
-rw-r--r--src/mongo/s/catalog/sharding_catalog_client_test.cpp71
-rw-r--r--src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp24
-rw-r--r--src/mongo/s/catalog_cache_refresh_test.cpp16
-rw-r--r--src/mongo/s/client/shard_remote.cpp2
-rw-r--r--src/mongo/s/cluster_identity_loader_test.cpp6
-rw-r--r--src/mongo/s/commands/cluster_find_cmd.cpp5
-rw-r--r--src/mongo/s/query/cluster_find.cpp21
-rw-r--r--src/mongo/s/query/results_merger_test_fixture.h5
-rw-r--r--src/mongo/s/sessions_collection_sharded.cpp8
-rw-r--r--src/mongo/s/sharding_router_test_fixture.cpp7
-rw-r--r--src/mongo/shell/bench.cpp6
44 files changed, 1110 insertions, 1309 deletions
diff --git a/src/mongo/client/dbclient_cursor.cpp b/src/mongo/client/dbclient_cursor.cpp
index f38a70f50b5..999a382b6ff 100644
--- a/src/mongo/client/dbclient_cursor.cpp
+++ b/src/mongo/client/dbclient_cursor.cpp
@@ -133,15 +133,16 @@ Message DBClientCursor::_assembleInit() {
// Legacy queries don't handle readOnce.
qr.getValue()->setReadOnce(true);
}
- if (query.getBoolField("$_requestResumeToken")) {
+ if (query.getBoolField(FindCommand::kRequestResumeTokenFieldName)) {
// Legacy queries don't handle requestResumeToken.
qr.getValue()->setRequestResumeToken(true);
}
- if (query.hasField("$_resumeAfter")) {
+ if (query.hasField(FindCommand::kResumeAfterFieldName)) {
// Legacy queries don't handle resumeAfter.
- qr.getValue()->setResumeAfter(query.getObjectField("$_resumeAfter"));
+ qr.getValue()->setResumeAfter(
+ query.getObjectField(FindCommand::kResumeAfterFieldName));
}
- if (auto replTerm = query[QueryRequest::kTermField]) {
+ if (auto replTerm = query[FindCommand::kTermFieldName]) {
// Legacy queries don't handle term.
qr.getValue()->setReplicationTerm(replTerm.numberLong());
}
@@ -152,8 +153,8 @@ Message DBClientCursor::_assembleInit() {
} else if (_readConcernObj) {
qr.getValue()->setReadConcern(*_readConcernObj);
}
- BSONObj cmd = _nsOrUuid.uuid() ? qr.getValue()->asFindCommandWithUuid()
- : qr.getValue()->asFindCommand();
+ BSONObj cmd = qr.getValue()->asFindCommand();
+
if (auto readPref = query["$readPreference"]) {
// QueryRequest doesn't handle $readPreference.
cmd = BSONObjBuilder(std::move(cmd)).append(readPref).obj();
diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp
index f7eb40c9025..7e51b1538d6 100644
--- a/src/mongo/db/commands/find_cmd.cpp
+++ b/src/mongo/db/commands/find_cmd.cpp
@@ -70,8 +70,7 @@ std::unique_ptr<QueryRequest> parseCmdObjectToQueryRequest(OperationContext* opC
NamespaceString nss,
BSONObj cmdObj,
bool isExplain) {
- auto qr = uassertStatusOK(
- QueryRequest::makeFromFindCommand(std::move(nss), std::move(cmdObj), isExplain));
+ auto qr = QueryRequest::makeFromFindCommand(std::move(cmdObj), isExplain, std::move(nss));
if (!qr->getLegacyRuntimeConstants()) {
qr->setLegacyRuntimeConstants(Variables::generateRuntimeConstants(opCtx));
}
diff --git a/src/mongo/db/matcher/expression_optimize_test.cpp b/src/mongo/db/matcher/expression_optimize_test.cpp
index 699d547c091..f34ac14890e 100644
--- a/src/mongo/db/matcher/expression_optimize_test.cpp
+++ b/src/mongo/db/matcher/expression_optimize_test.cpp
@@ -283,8 +283,9 @@ TEST(ExpressionOptimizeTest, IsValidGeoNearNaturalHint) {
TEST(ExpressionOptimizeTest, IsValidNaturalSortIndexHint) {
const bool isExplain = false;
- auto qr = assertGet(QueryRequest::makeFromFindCommand(
- nss, fromjson("{find: 'testcoll', sort: {$natural: 1}, hint: {a: 1}}"), isExplain));
+ auto qr = QueryRequest::makeFromFindCommand(
+ fromjson("{find: 'testcoll', sort: {$natural: 1}, hint: {a: 1}, '$db': 'test'}"),
+ isExplain);
// Invalid: {$natural: 1} sort order and index hint.
ASSERT_NOT_OK(isValid("{}", *qr));
@@ -292,8 +293,9 @@ TEST(ExpressionOptimizeTest, IsValidNaturalSortIndexHint) {
TEST(ExpressionOptimizeTest, IsValidNaturalSortNaturalHint) {
const bool isExplain = false;
- auto qr = assertGet(QueryRequest::makeFromFindCommand(
- nss, fromjson("{find: 'testcoll', sort: {$natural: 1}, hint: {$natural: 1}}"), isExplain));
+ auto qr = QueryRequest::makeFromFindCommand(
+ fromjson("{find: 'testcoll', sort: {$natural: 1}, hint: {$natural: 1}, '$db': 'test'}"),
+ isExplain);
// Valid: {$natural: 1} sort order and {$natural: 1} hint.
ASSERT_OK(isValid("{}", *qr));
@@ -301,8 +303,9 @@ TEST(ExpressionOptimizeTest, IsValidNaturalSortNaturalHint) {
TEST(ExpressionOptimizeTest, IsValidNaturalSortNaturalHintDifferentDirections) {
const bool isExplain = false;
- auto qr = assertGet(QueryRequest::makeFromFindCommand(
- nss, fromjson("{find: 'testcoll', sort: {$natural: 1}, hint: {$natural: -1}}"), isExplain));
+ auto qr = QueryRequest::makeFromFindCommand(
+ fromjson("{find: 'testcoll', sort: {$natural: 1}, hint: {$natural: -1}, '$db': 'test'}"),
+ isExplain);
// Invalid: {$natural: 1} sort order and {$natural: -1} hint.
ASSERT_NOT_OK(isValid("{}", *qr));
diff --git a/src/mongo/db/namespace_string.cpp b/src/mongo/db/namespace_string.cpp
index b0d876cde44..3b28b021c50 100644
--- a/src/mongo/db/namespace_string.cpp
+++ b/src/mongo/db/namespace_string.cpp
@@ -325,6 +325,23 @@ std::string NamespaceStringOrUUID::toString() const {
return _uuid->toString();
}
+void NamespaceStringOrUUID::serialize(BSONObjBuilder* builder, StringData fieldName) const {
+ invariant(_uuid || _nss);
+ if (_preferNssForSerialization) {
+ if (_nss) {
+ builder->append(fieldName, _nss->coll());
+ } else {
+ _uuid->appendToBuilder(builder, fieldName);
+ }
+ } else {
+ if (_uuid) {
+ _uuid->appendToBuilder(builder, fieldName);
+ } else {
+ builder->append(fieldName, _nss->coll());
+ }
+ }
+}
+
std::ostream& operator<<(std::ostream& stream, const NamespaceString& nss) {
return stream << nss.toString();
}
diff --git a/src/mongo/db/namespace_string.h b/src/mongo/db/namespace_string.h
index c264421dc13..029d9c7e18c 100644
--- a/src/mongo/db/namespace_string.h
+++ b/src/mongo/db/namespace_string.h
@@ -498,6 +498,10 @@ public:
return _nss;
}
+ void setNss(const NamespaceString& nss) {
+ _nss = nss;
+ }
+
const boost::optional<UUID>& uuid() const {
return _uuid;
}
@@ -509,6 +513,10 @@ public:
return _dbname;
}
+ void preferNssForSerialization() {
+ _preferNssForSerialization = true;
+ }
+
/**
* Returns database name derived from either '_nss' or '_dbname'.
*/
@@ -518,11 +526,16 @@ public:
std::string toString() const;
+ void serialize(BSONObjBuilder* builder, StringData fieldName) const;
+
private:
- // At any given time exactly one of these optionals will be initialized
+ // At any given time exactly one of these optionals will be initialized.
boost::optional<NamespaceString> _nss;
boost::optional<UUID> _uuid;
+ // When seralizing, if both '_nss' and '_uuid' are present, use '_nss'.
+ bool _preferNssForSerialization = false;
+
// Empty string when '_nss' is non-none, and contains the database name when '_uuid' is
// non-none. Although the UUID specifies a collection uniquely, we must later verify that the
// collection belongs to the database named here.
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index b2f9bb5491f..34014831f0b 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -204,8 +204,13 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> attemptToGetExe
qr->setFilter(queryObj);
qr->setProj(projectionObj);
qr->setSort(sortObj);
- qr->setSkip(skipThenLimit.getSkip());
- qr->setLimit(skipThenLimit.getLimit());
+ if (auto skip = skipThenLimit.getSkip()) {
+ qr->setSkip(static_cast<std::int64_t>(*skip));
+ }
+ if (auto limit = skipThenLimit.getLimit()) {
+ qr->setLimit(static_cast<std::int64_t>(*limit));
+ }
+
if (aggRequest) {
qr->setExplain(static_cast<bool>(aggRequest->getExplain()));
qr->setHint(aggRequest->getHint().value_or(BSONObj()));
diff --git a/src/mongo/db/pipeline/sharded_agg_helpers.cpp b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
index db56faecdbf..039288dc823 100644
--- a/src/mongo/db/pipeline/sharded_agg_helpers.cpp
+++ b/src/mongo/db/pipeline/sharded_agg_helpers.cpp
@@ -825,7 +825,7 @@ BSONObj createCommandForTargetedShards(const boost::intrusive_ptr<ExpressionCont
}
targetedCmd[AggregateCommand::kBatchSizeFieldName] =
- Value(DOC(QueryRequest::kBatchSizeField << 0));
+ Value(DOC(aggregation_request_helper::kBatchSizeField << 0));
targetedCmd[AggregateCommand::kExchangeFieldName] =
exchangeSpec ? Value(exchangeSpec->exchangeSpec.toBSON()) : Value();
diff --git a/src/mongo/db/query/SConscript b/src/mongo/db/query/SConscript
index 8a7eed3fde0..64cf8488fba 100644
--- a/src/mongo/db/query/SConscript
+++ b/src/mongo/db/query/SConscript
@@ -99,17 +99,6 @@ env.Library(
],
)
-env.Library(
- target='distinct_command_idl',
- source=[
- 'distinct_command.idl',
- ],
- LIBDEPS_PRIVATE=[
- '$BUILD_DIR/mongo/db/namespace_string',
- '$BUILD_DIR/mongo/idl/idl_parser',
- ],
-)
-
# Shared mongod/mongos query code.
env.Library(
target="query_common",
@@ -124,7 +113,6 @@ env.Library(
"collation/collator_factory_icu",
"collation/collator_icu",
"datetime/init_timezone_data",
- "distinct_command_idl",
"explain_options",
"query_planner",
"query_request",
@@ -187,6 +175,8 @@ env.Library(
env.Library(
target="query_request",
source=[
+ "distinct_command.idl",
+ "find_command.idl",
"query_request.cpp",
"tailable_mode.cpp",
"tailable_mode.idl",
@@ -200,6 +190,7 @@ env.Library(
"$BUILD_DIR/mongo/db/commands/test_commands_enabled",
"$BUILD_DIR/mongo/db/pipeline/runtime_constants_idl",
"$BUILD_DIR/mongo/db/repl/read_concern_args",
+ "hint_parser",
],
)
diff --git a/src/mongo/db/query/canonical_query.h b/src/mongo/db/query/canonical_query.h
index 1e4ac425c25..ddcd5c731d7 100644
--- a/src/mongo/db/query/canonical_query.h
+++ b/src/mongo/db/query/canonical_query.h
@@ -132,7 +132,7 @@ public:
MatchExpression* root() const {
return _root.get();
}
- BSONObj getQueryObj() const {
+ const BSONObj& getQueryObj() const {
return _qr->getFilter();
}
const QueryRequest& getQueryRequest() const {
diff --git a/src/mongo/db/query/canonical_query_test.cpp b/src/mongo/db/query/canonical_query_test.cpp
index 7d159ae47f2..41c5512e52f 100644
--- a/src/mongo/db/query/canonical_query_test.cpp
+++ b/src/mongo/db/query/canonical_query_test.cpp
@@ -99,8 +99,9 @@ TEST(CanonicalQueryTest, IsValidSortKeyMetaProjection) {
// Passing a sortKey meta-projection without a sort is an error.
{
const bool isExplain = false;
- auto qr = assertGet(QueryRequest::makeFromFindCommand(
- nss, fromjson("{find: 'testcoll', projection: {foo: {$meta: 'sortKey'}}}"), isExplain));
+ auto qr = QueryRequest::makeFromFindCommand(
+ fromjson("{find: 'testcoll', projection: {foo: {$meta: 'sortKey'}}, '$db': 'test'}"),
+ isExplain);
auto cq = CanonicalQuery::canonicalize(opCtx.get(), std::move(qr));
ASSERT_NOT_OK(cq.getStatus());
}
@@ -108,10 +109,10 @@ TEST(CanonicalQueryTest, IsValidSortKeyMetaProjection) {
// Should be able to successfully create a CQ when there is a sort.
{
const bool isExplain = false;
- auto qr = assertGet(QueryRequest::makeFromFindCommand(
- nss,
- fromjson("{find: 'testcoll', projection: {foo: {$meta: 'sortKey'}}, sort: {bar: 1}}"),
- isExplain));
+ auto qr = QueryRequest::makeFromFindCommand(
+ fromjson("{find: 'testcoll', projection: {foo: {$meta: 'sortKey'}}, sort: {bar: 1}, "
+ "'$db': 'test'}"),
+ isExplain);
auto cq = CanonicalQuery::canonicalize(opCtx.get(), std::move(qr));
ASSERT_OK(cq.getStatus());
}
@@ -270,8 +271,9 @@ TEST(CanonicalQueryTest, CanonicalizeFromBaseQuery) {
const bool isExplain = true;
const std::string cmdStr =
- "{find:'bogusns', filter:{$or:[{a:1,b:1},{a:1,c:1}]}, projection:{a:1}, sort:{b:1}}";
- auto qr = assertGet(QueryRequest::makeFromFindCommand(nss, fromjson(cmdStr), isExplain));
+ "{find:'bogusns', filter:{$or:[{a:1,b:1},{a:1,c:1}]}, projection:{a:1}, sort:{b:1}, '$db': "
+ "'test'}";
+ auto qr = QueryRequest::makeFromFindCommand(fromjson(cmdStr), isExplain);
auto baseCq = assertGet(CanonicalQuery::canonicalize(opCtx.get(), std::move(qr)));
MatchExpression* firstClauseExpr = baseCq->root()->getChild(0);
diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp
index e399de60858..167812995ac 100644
--- a/src/mongo/db/query/find.cpp
+++ b/src/mongo/db/query/find.cpp
@@ -86,7 +86,7 @@ bool shouldSaveCursor(OperationContext* opCtx,
PlanExecutor::ExecState finalState,
PlanExecutor* exec) {
const QueryRequest& qr = exec->getCanonicalQuery()->getQueryRequest();
- if (!qr.wantMore()) {
+ if (qr.isSingleBatch()) {
return false;
}
@@ -718,7 +718,7 @@ bool runQuery(OperationContext* opCtx,
LOGV2_DEBUG(20915,
5,
"Enough for first batch",
- "wantMore"_attr = qr.wantMore(),
+ "wantMore"_attr = !qr.isSingleBatch(),
"numToReturn"_attr = qr.getNToReturn().value_or(0),
"numResults"_attr = numResults);
break;
diff --git a/src/mongo/db/query/find_command.idl b/src/mongo/db/query/find_command.idl
new file mode 100644
index 00000000000..8d47e5cda73
--- /dev/null
+++ b/src/mongo/db/query/find_command.idl
@@ -0,0 +1,197 @@
+# Copyright(C) 2020 - present MongoDB, Inc.
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the Server Side Public License, version 1,
+# as published by MongoDB, Inc.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# Server Side Public License for more details.
+#
+# You should have received a copy of the Server Side Public License
+# along with this program. If not, see
+# <http://www.mongodb.com/licensing/server-side-public-license>.
+#
+# As a special exception, the copyright holders give permission to link the
+# code of portions of this program with the OpenSSL library under certain
+# conditions as described in each individual source file and distribute
+# linked combinations including the program with the OpenSSL library. You
+# must comply with the Server Side Public License in all respects for
+# all of the code used other than as permitted herein. If you modify file(s)
+# with this exception, you may extend this exception to your version of the
+# file(s), but you are not obligated to do so. If you do not wish to do so,
+# delete this exception statement from your version. If you delete this
+# exception statement from all source files in the program, then also delete
+# it in the license file.
+#
+
+# This IDL file describes the BSON format for find command. Note that the legacy OP_MSG request
+# parameters are not represented in the structre.
+
+global:
+ cpp_namespace: "mongo"
+ cpp_includes:
+ - "mongo/db/namespace_string.h"
+
+imports:
+ - "mongo/db/logical_session_id.idl"
+ - "mongo/db/pipeline/legacy_runtime_constants.idl"
+ - "mongo/idl/basic_types.idl"
+ - "mongo/db/query/hint.idl"
+
+types:
+ boolNoOpSerializer:
+ bson_serialization_type: any
+ description: "Bool data type field which doesn't produce any data when serialized."
+ cpp_type: "bool"
+ deserializer: "::mongo::parseBoolean"
+ serializer: "::mongo::noOpSerializer"
+ object_owned_nonempty_serialize:
+ bson_serialization_type: any
+ description: "An owned BSONObj, which gets serialized only when the BSON is not empty.
+ The object is ignored if empty, null or missing."
+ cpp_type: "mongo::BSONObj"
+ serializer: "::mongo::serializeBSONWhenNotEmpty"
+ deserializer: "::mongo::parseOwnedBSON"
+ default: mongo::BSONObj()
+ maxTimeMS:
+ bson_serialization_type: any
+ description: "An int representing max time ms."
+ cpp_type: "std::int32_t"
+ deserializer: "::mongo::QueryRequest::parseMaxTimeMSForIDL"
+
+commands:
+ find:
+ cpp_name: FindCommand
+ command_name: find
+ description: "A struct representing the find command"
+ strict: true
+ namespace: concatenate_with_db_or_uuid
+ non_const_getter: true
+ fields:
+ filter:
+ description: "The query predicate. If unspecified, then all documents in the collection
+ will match the predicate."
+ type: object_owned_nonempty_serialize
+ projection:
+ description: "The projection specification to determine which fields to include in the
+ returned documents."
+ type: object_owned_nonempty_serialize
+ sort:
+ description: "The sort specification for the ordering of the results."
+ type: object_owned_nonempty_serialize
+ hint:
+ description: "Specify either the index name as a string or the index key pattern. If
+ specified, then the query system will only consider plans using the hinted index."
+ type: indexHint
+ default: mongo::BSONObj()
+ collation:
+ description: "Specifies the collation to use for the operation."
+ type: object_owned_nonempty_serialize
+ skip:
+ description: "Number of documents to skip."
+ type: safeInt64
+ optional: true
+ limit:
+ description: "The maximum number of documents to return."
+ type: safeInt64
+ optional: true
+ batchSize:
+ description: "The number of documents to return in the first batch."
+ type: safeInt64
+ optional: true
+ ntoreturn:
+ description: "Deprecated. Should use batchSize."
+ type: safeInt64
+ optional: true
+ singleBatch:
+ description: "Determines whether to close the cursor after the first batch."
+ type: optionalBool
+ allowDiskUse:
+ description: "Use allowDiskUse to allow MongoDB to use temporary files on disk to store
+ data exceeding the 100 megabyte memory limit while processing a blocking sort operation."
+ type: optionalBool
+ min:
+ description: "The inclusive lower bound for a specific index."
+ type: object_owned_nonempty_serialize
+ max:
+ description: "The exclusive upper bound for a specific index."
+ type: object_owned_nonempty_serialize
+ returnKey:
+ description: "If true, returns only the index keys in the resulting documents."
+ type: optionalBool
+ showRecordId:
+ description: "Determines whether to return the record identifier for each document."
+ type: optionalBool
+ $queryOptions:
+ description: "Deprecated. A mechanism to specify readPreference."
+ cpp_name: unwrappedReadPref
+ type: object_owned_nonempty_serialize
+ tailable:
+ description: "Returns a tailable cursor for a capped collections."
+ type: optionalBool
+ oplogReplay:
+ description: "Deprecated. An internal command for replaying a replica set’s oplog."
+ type: boolNoOpSerializer
+ optional: true
+ noCursorTimeout:
+ description: "Prevents the server from timing out idle cursors after an inactivity period."
+ type: optionalBool
+ awaitData:
+ description: "Use in conjunction with the tailable option to block a getMore command on the
+ cursor temporarily if at the end of data rather than returning no data."
+ type: optionalBool
+ allowPartialResults:
+ description: "For queries against a sharded collection, allows the command (or subsequent
+ getMore commands) to return partial results, rather than an error, if one or more queried
+ shards are unavailable."
+ type: optionalBool
+ let:
+ description: "Allows user defined variables to be used inside $expr."
+ type: object_owned
+ optional: true
+ options:
+ description: "Deprecated."
+ type: object_owned
+ optional: true
+ term:
+ description: "Deprecated."
+ type: safeInt64
+ optional: true
+ readOnce:
+ description: "Deprecated."
+ type: optionalBool
+ allowSpeculativeMajorityRead:
+ description: "Deprecated."
+ type: optionalBool
+ $_requestResumeToken:
+ description: "Deprecated."
+ cpp_name: requestResumeToken
+ type: optionalBool
+ $_resumeAfter:
+ description: "Deprecated."
+ cpp_name: resumeAfter
+ type: object_owned_nonempty_serialize
+ default: mongo::BSONObj()
+ _use44SortKeys:
+ description: "An internal parameter used to determine the serialization format for sort
+ keys. TODO SERVER-47065: A 4.7+ node still has to accept the '_use44SortKeys' field, since
+ it could be included in a command sent from a 4.4 mongos. When 5.0 becomes last-lts, this
+ code to tolerate the '_use44SortKeys' field can be deleted."
+ type: bool
+ optional: true
+ maxTimeMS:
+ description: "The cumulative time limit in milliseconds for processing operations on the
+ cursor."
+ type: maxTimeMS
+ optional: true
+ readConcern:
+ description: "Specifies the read concern."
+ type: object_owned
+ optional: true
+ runtimeConstants:
+ description: "A collection of values that do not change once computed."
+ cpp_name: legacyRuntimeConstants
+ type: LegacyRuntimeConstants
+ optional: true
diff --git a/src/mongo/db/query/parsed_distinct.cpp b/src/mongo/db/query/parsed_distinct.cpp
index 5cfa30ebdf9..08647e6bf53 100644
--- a/src/mongo/db/query/parsed_distinct.cpp
+++ b/src/mongo/db/query/parsed_distinct.cpp
@@ -231,9 +231,9 @@ StatusWith<BSONObj> ParsedDistinct::asAggregationCommand() const {
aggregationBuilder.append(QueryRequest::cmdOptionMaxTimeMS, qr.getMaxTimeMS());
}
- if (!qr.getReadConcern().isEmpty()) {
+ if (qr.getReadConcern() && !qr.getReadConcern()->isEmpty()) {
aggregationBuilder.append(repl::ReadConcernArgs::kReadConcernFieldName,
- qr.getReadConcern());
+ *qr.getReadConcern());
}
if (!qr.getUnwrappedReadPref().isEmpty()) {
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index f3d68422f54..c10a1d2a2cb 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -1048,8 +1048,7 @@ protected:
solns.clear();
const bool isExplain = false;
- std::unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+ std::unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
const boost::intrusive_ptr<ExpressionContext> expCtx;
auto statusWithCQ =
@@ -1758,8 +1757,9 @@ TEST_F(CachePlanSelectionTest, Or2DNonNearNotCached) {
TEST_F(CachePlanSelectionTest, MatchingCollation) {
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
addIndex(BSON("x" << 1), "x_1", &collator);
- runQueryAsCommand(fromjson(
- "{find: 'testns', filter: {x: 'foo'}, collation: {locale: 'mock_reverse_string'}}"));
+ runQueryAsCommand(
+ fromjson("{find: 'testns', filter: {x: 'foo'}, collation: {locale: 'mock_reverse_string'}, "
+ "'$db': 'test'}"));
assertPlanCacheRecoversSolution(BSON("x"
<< "bar"),
diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp
index a2098647177..6c19619ba1c 100644
--- a/src/mongo/db/query/planner_analysis.cpp
+++ b/src/mongo/db/query/planner_analysis.cpp
@@ -540,7 +540,7 @@ bool canUseSimpleSort(const QuerySolutionNode& solnRoot,
const CanonicalQuery& cq,
const QueryPlannerParams& plannerParams) {
const bool splitLimitedSortEligible = cq.getQueryRequest().getNToReturn() &&
- cq.getQueryRequest().wantMore() &&
+ !cq.getQueryRequest().isSingleBatch() &&
plannerParams.options & QueryPlannerParams::SPLIT_LIMITED_SORT;
// The simple sort stage discards any metadata other than sort key metadata. It can only be used
@@ -857,7 +857,7 @@ QuerySolutionNode* QueryPlannerAnalysis::analyzeSort(const CanonicalQuery& query
// One way to handle the ambiguity of a limited OR stage is to use the SPLIT_LIMITED_SORT
// hack.
//
- // If wantMore is false (meaning that 'ntoreturn' was initially passed to the server as a
+ // If singleBatch is true (meaning that 'ntoreturn' was initially passed to the server as a
// negative value), then we treat numToReturn as a limit. Since there is no limit-batchSize
// ambiguity in this case, we do not use the SPLIT_LIMITED_SORT hack.
//
@@ -876,7 +876,7 @@ QuerySolutionNode* QueryPlannerAnalysis::analyzeSort(const CanonicalQuery& query
//
// Not allowed for geo or text, because we assume elsewhere that those stages appear just
// once.
- if (qr.wantMore() && params.options & QueryPlannerParams::SPLIT_LIMITED_SORT &&
+ if (!qr.isSingleBatch() && params.options & QueryPlannerParams::SPLIT_LIMITED_SORT &&
!QueryPlannerCommon::hasNode(query.root(), MatchExpression::TEXT) &&
!QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO) &&
!QueryPlannerCommon::hasNode(query.root(), MatchExpression::GEO_NEAR)) {
@@ -913,7 +913,6 @@ std::unique_ptr<QuerySolution> QueryPlannerAnalysis::analyzeDataAccess(
const QueryPlannerParams& params,
std::unique_ptr<QuerySolutionNode> solnRoot) {
auto soln = std::make_unique<QuerySolution>();
- soln->filterData = query.getQueryObj();
soln->indexFilterApplied = params.indexFiltersApplied;
solnRoot->computeProperties();
@@ -1018,7 +1017,7 @@ std::unique_ptr<QuerySolution> QueryPlannerAnalysis::analyzeDataAccess(
limit->limit = *qr.getLimit();
limit->children.push_back(solnRoot.release());
solnRoot.reset(limit);
- } else if (qr.getNToReturn() && !qr.wantMore()) {
+ } else if (qr.getNToReturn() && qr.isSingleBatch()) {
// We have a "legacy limit", i.e. a negative ntoreturn value from an OP_QUERY style
// find.
LimitNode* limit = new LimitNode();
diff --git a/src/mongo/db/query/query_planner_array_test.cpp b/src/mongo/db/query/query_planner_array_test.cpp
index e4a95c9a1ce..089c6e70848 100644
--- a/src/mongo/db/query/query_planner_array_test.cpp
+++ b/src/mongo/db/query/query_planner_array_test.cpp
@@ -2423,7 +2423,7 @@ TEST_F(QueryPlannerTest, MultikeyIndexScanWithMinKeyMaxKeyBoundsCanProvideSort)
params.options &= ~QueryPlannerParams::INCLUDE_COLLSCAN;
MultikeyPaths multikeyPaths{{0U}};
addIndex(BSON("a" << 1), multikeyPaths);
- runQueryAsCommand(fromjson("{sort: {a: 1}}"));
+ runQueryAsCommand(fromjson("{find: 'testns', sort: {a: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -2435,7 +2435,7 @@ TEST_F(QueryPlannerTest, MultikeyIndexScanWithBoundsOnIndexWithoutSharedPrefixCa
params.options &= ~QueryPlannerParams::INCLUDE_COLLSCAN;
MultikeyPaths multikeyPaths{{0U}, {0U}};
addIndex(BSON("a" << 1 << "b" << 1), multikeyPaths);
- runQueryAsCommand(fromjson("{filter: {b : {$gte: 3}}, sort: {a: 1}}"));
+ runQueryAsCommand(fromjson("{find: 'testns', filter: {b : {$gte: 3}}, sort: {a: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -2448,7 +2448,7 @@ TEST_F(QueryPlannerTest,
params.options &= ~QueryPlannerParams::INCLUDE_COLLSCAN;
MultikeyPaths multikeyPaths{{0U}, {0U}};
addIndex(BSON("a" << 1 << "b" << 1), multikeyPaths);
- runQueryAsCommand(fromjson("{filter: {a : {$eq: 3}}, sort: {b: 1}}"));
+ runQueryAsCommand(fromjson("{find: 'testns', filter: {a : {$eq: 3}}, sort: {b: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -2461,7 +2461,7 @@ TEST_F(QueryPlannerTest,
params.options &= ~QueryPlannerParams::INCLUDE_COLLSCAN;
MultikeyPaths multikeyPaths{{0U}, {0U}};
addIndex(BSON("a" << 1 << "a.b" << 1), multikeyPaths);
- runQueryAsCommand(fromjson("{filter: {a : {$gte: 3}}, sort: {'a.b': 1}}"));
+ runQueryAsCommand(fromjson("{find: 'testns', filter: {a : {$gte: 3}}, sort: {'a.b': 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -2475,7 +2475,7 @@ TEST_F(QueryPlannerTest,
params.options &= ~QueryPlannerParams::INCLUDE_COLLSCAN;
MultikeyPaths multikeyPaths{{0U, 1U}, {0U}};
addIndex(BSON("a.b" << 1 << "a" << 1), multikeyPaths);
- runQueryAsCommand(fromjson("{filter: {'a.b' : {$gte: 3}}, sort: {a: 1}}"));
+ runQueryAsCommand(fromjson("{find: 'testns', filter: {'a.b' : {$gte: 3}}, sort: {a: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
diff --git a/src/mongo/db/query/query_planner_hashed_index_test.cpp b/src/mongo/db/query/query_planner_hashed_index_test.cpp
index 07582599cae..7ed9ab2d108 100644
--- a/src/mongo/db/query/query_planner_hashed_index_test.cpp
+++ b/src/mongo/db/query/query_planner_hashed_index_test.cpp
@@ -391,7 +391,8 @@ TEST_F(QueryPlannerHashedTest, CannotCoverQueryWhenHashedFieldIsPrefix) {
// projection doesn't include the hashed field. This is to avoid the possibility of hash
// collision. If two different fields produce the same hash value, there is no way to
// distinguish them without fetching the document.
- runQueryAsCommand(fromjson("{filter: {x : {$eq: 5}}, projection:{_id: 0, y: 1}}"));
+ runQueryAsCommand(
+ fromjson("{find: 'test', filter: {x : {$eq: 5}}, projection:{_id: 0, y: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{proj: {spec: {_id: 0, y: 1}, node: {fetch: {filter: {x : {$eq: 5}}, node: {ixscan: "
@@ -400,7 +401,8 @@ TEST_F(QueryPlannerHashedTest, CannotCoverQueryWhenHashedFieldIsPrefix) {
// Verify that queries cannot be covered with hashed field is a prefix, despite query and
// projection not using hashed fields.
- runQueryAsCommand(fromjson("{filter: {y : {$eq: 5}}, projection:{_id: 0, y: 1}}"));
+ runQueryAsCommand(
+ fromjson("{find: 'test', filter: {y : {$eq: 5}}, projection:{_id: 0, y: 1}}"));
assertNumSolutions(1U);
assertSolutionExists("{proj: {spec: {_id: 0, y: 1}, node: {cscan: {dir: 1}} }}");
}
@@ -411,7 +413,8 @@ TEST_F(QueryPlannerHashedTest, CanCoverQueryWhenHashedFieldIsNotPrefix) {
<< "z" << -1));
// Verify that query gets covered when neither query nor project use hashed field.
- runQueryAsCommand(fromjson("{filter: {x: {$gt: 24, $lt: 27}}, projection:{_id: 0, z: 1}}"));
+ runQueryAsCommand(
+ fromjson("{find: 'test', filter: {x: {$gt: 24, $lt: 27}}, projection:{_id: 0, z: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -420,7 +423,7 @@ TEST_F(QueryPlannerHashedTest, CanCoverQueryWhenHashedFieldIsNotPrefix) {
"[['MaxKey','MinKey',true,true]] }}}}}");
// Verify that query doesn't get covered when query is on a hashed field.
- runQueryAsCommand(fromjson("{filter: {x: 1, y: 1}, projection:{_id: 0, z: 1}}"));
+ runQueryAsCommand(fromjson("{find: 'test', filter: {x: 1, y: 1}, projection:{_id: 0, z: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{proj: {spec: {_id: 0, z: 1}, node: {fetch: {filter: {y : {$eq: 1}}, node: {ixscan: "
@@ -428,7 +431,7 @@ TEST_F(QueryPlannerHashedTest, CanCoverQueryWhenHashedFieldIsNotPrefix) {
getHashedBound(1) + "] , z: [['MaxKey','MinKey',true,true]]} }} }} }}");
// Verify that the query doesn't get covered when projection is on a hashed field.
- runQueryAsCommand(fromjson("{filter: {x: 1}, projection:{_id: 0, y: 1}}"));
+ runQueryAsCommand(fromjson("{find: 'test', filter: {x: 1}, projection:{_id: 0, y: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{proj: {spec: {_id: 0, y: 1}, node: {fetch: {filter: null, node: {ixscan: {filter: null, "
@@ -448,7 +451,8 @@ TEST_F(QueryPlannerHashedTest, CompoundHashedShardKeyWhenIndexAndShardKeyBothPro
<< "hashed");
// Verify that query gets covered when neither query nor project use hashed field.
- runQueryAsCommand(fromjson("{filter: {x: {$gt: 24, $lt: 27}}, projection:{_id: 0, z: 1}}"));
+ runQueryAsCommand(
+ fromjson("{find: 'test', filter: {x: {$gt: 24, $lt: 27}}, projection:{_id: 0, z: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
@@ -466,7 +470,7 @@ TEST_F(QueryPlannerHashedTest,
<< "hashed");
// Verify that the query doesn't get covered when projection is on a hashed field.
- runQueryAsCommand(fromjson("{filter: {x: 1}, projection:{_id: 0, y: 1}}"));
+ runQueryAsCommand(fromjson("{find: 'test', filter: {x: 1}, projection:{_id: 0, y: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{proj: {spec: {_id: 0, y: 1}, node: {fetch: {filter: null, node: {sharding_filter: {node: "
@@ -476,7 +480,7 @@ TEST_F(QueryPlannerHashedTest,
// Verify that query doesn't get covered when query is on a hashed field, even though the
// projection does not include the hashed field.
- runQueryAsCommand(fromjson("{filter: {x: 1, y: 1}, projection:{_id: 0, z: 1}}"));
+ runQueryAsCommand(fromjson("{find: 'test', filter: {x: 1, y: 1}, projection:{_id: 0, z: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{proj: {spec: {_id: 0, z: 1}, node: {sharding_filter: {node: {fetch: {filter: {y : {$eq: "
@@ -496,7 +500,7 @@ TEST_F(QueryPlannerHashedTest,
// shard key field is hashed.
params.shardKey = BSON("x" << 1 << "z"
<< "hashed");
- runQueryAsCommand(fromjson("{filter: {x: 1}, projection:{_id: 0, z: 1}}"));
+ runQueryAsCommand(fromjson("{find: 'test', filter: {x: 1}, projection:{_id: 0, z: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{proj: {spec: {_id: 0, z: 1}, node: {sharding_filter: {node: {ixscan: {filter: null, "
@@ -513,7 +517,7 @@ TEST_F(QueryPlannerHashedTest,
// Cannot cover the query when index provides hashed value for a field ('y'), but the
// corresponding shard key field is a range field.
params.shardKey = BSON("x" << 1 << "y" << 1);
- runQueryAsCommand(fromjson("{filter: {x: 1}, projection:{_id: 0, x: 1}}"));
+ runQueryAsCommand(fromjson("{find: 'test', filter: {x: 1}, projection:{_id: 0, x: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{proj: {spec: {_id: 0, x: 1}, node: {sharding_filter: {node: {fetch: {filter: null, node: "
@@ -533,7 +537,7 @@ TEST_F(QueryPlannerHashedTest, CompoundHashedShardKeyWhenIndexDoesNotHaveAllShar
params.shardKey = BSON("x" << 1 << "y"
<< "hashed"
<< "newField" << 1);
- runQueryAsCommand(fromjson("{filter: {x: 1}, projection:{_id: 0, x: 1}}"));
+ runQueryAsCommand(fromjson("{find: 'test', filter: {x: 1}, projection:{_id: 0, x: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{proj: {spec: {_id: 0, x: 1}, node: {sharding_filter: {node: {fetch: {filter: null, node: "
@@ -543,7 +547,7 @@ TEST_F(QueryPlannerHashedTest, CompoundHashedShardKeyWhenIndexDoesNotHaveAllShar
params.shardKey = BSON("x" << 1 << "newField"
<< "hashed");
- runQueryAsCommand(fromjson("{filter: {x: 1}, projection:{_id: 0, x: 1}}"));
+ runQueryAsCommand(fromjson("{find: 'test', filter: {x: 1}, projection:{_id: 0, x: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{proj: {spec: {_id: 0, x: 1}, node: {sharding_filter: {node: {fetch: {filter: null, node: "
@@ -561,13 +565,13 @@ TEST_F(QueryPlannerHashedTest, SortWhenHashedFieldIsPrefix) {
<< "y" << -1 << "z" << 1));
// Verify that sort on a hashed field results in collection scan.
- runQueryAsCommand(fromjson("{filter: {}, sort: {x: 1}}"));
+ runQueryAsCommand(fromjson("{find: 'test', filter: {}, sort: {x: 1}}"));
assertNumSolutions(1U);
assertSolutionExists("{sort: {pattern: {x: 1}, limit: 0, node: {cscan: {dir: 1}} }}");
// Verify that a list of exact match predicates on hashed field (prefix) and sort with an
// immediate range field can use 'SORT_MERGE'.
- runQueryAsCommand(fromjson("{filter: {x: {$in: [1, 2]}}, sort: {y: 1}}"));
+ runQueryAsCommand(fromjson("{find: 'test', filter: {x: {$in: [1, 2]}}, sort: {y: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{fetch: {filter: {x: {$in: [1,2]}}, node: {mergeSort: {nodes: [{ixscan: {pattern: {x: "
@@ -580,7 +584,7 @@ TEST_F(QueryPlannerHashedTest, SortWhenHashedFieldIsPrefix) {
// Verify that an equality predicate on hashed field (prefix) and sort with an immediate
// range field can be sorted by the index.
- runQueryAsCommand(fromjson("{filter: {x: 1}, sort: {y: 1, z: -1}}"));
+ runQueryAsCommand(fromjson("{find: 'test', filter: {x: 1}, sort: {y: 1, z: -1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{fetch: {filter: {x: {$eq: 1}}, node: {ixscan: {pattern: {x: "
@@ -590,7 +594,8 @@ TEST_F(QueryPlannerHashedTest, SortWhenHashedFieldIsPrefix) {
// {$exists: false} is treated as a point-interval in BSONNULL. Hence index can provide the
// sort.
- runQueryAsCommand(fromjson("{filter: {x: {$exists: false}}, sort: {y: 1, z: -1}}"));
+ runQueryAsCommand(
+ fromjson("{find: 'test', filter: {x: {$exists: false}}, sort: {y: 1, z: -1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{fetch: {filter: {x: {$exists: false}}, node: {ixscan: {pattern: {x: 'hashed', y: -1, z: "
@@ -600,7 +605,7 @@ TEST_F(QueryPlannerHashedTest, SortWhenHashedFieldIsPrefix) {
// Sort on any index field other than the one immediately following the hashed field will use a
// blocking sort.
- runQueryAsCommand(fromjson("{filter: {x: 3}, sort: {z: 1}}"));
+ runQueryAsCommand(fromjson("{find: 'test', filter: {x: 3}, sort: {z: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{sort: {pattern: {z:1}, limit: 0, type: 'simple', node: {fetch: {filter: {x: {$eq: "
@@ -615,14 +620,14 @@ TEST_F(QueryPlannerHashedTest, SortWhenNonHashedFieldIsPrefix) {
<< "a" << 1));
// Verify that sort on a hashed field results in collection scan.
- runQueryAsCommand(fromjson("{filter: {}, sort: {x: 1, y: -1, z: 1}}"));
+ runQueryAsCommand(fromjson("{find: 'test', filter: {}, sort: {x: 1, y: -1, z: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{sort: {pattern: {x: 1, y: -1, z: 1}, limit: 0, node: {cscan: {dir: 1}} }}");
// Verify that a list of exact match predicates on range field (prefix) and sort with an
// immediate range field can use 'SORT_MERGE'.
- runQueryAsCommand(fromjson("{filter: {x: {$in: [1, 2]}}, sort: {y: 1}}"));
+ runQueryAsCommand(fromjson("{find: 'test', filter: {x: {$in: [1, 2]}}, sort: {y: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{fetch: {filter: null, node: {mergeSort: {nodes: [{ixscan: {pattern: {x: "
@@ -636,7 +641,8 @@ TEST_F(QueryPlannerHashedTest, SortWhenNonHashedFieldIsPrefix) {
// Verify that an exact match predicate on range field (prefix) and sort with an immediate range
// field doesn't require any additional sort stages. The entire operation can be answered by the
// index.
- runQueryAsCommand(fromjson("{filter: {x: 1}, sort: {y: -1}, projection: {_id: 0, a: 1}}"));
+ runQueryAsCommand(
+ fromjson("{find: 'test', filter: {x: 1}, sort: {y: -1}, projection: {_id: 0, a: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{proj: {spec: {_id: 0, a: 1}, node: {ixscan: {pattern: {x: "
@@ -647,7 +653,8 @@ TEST_F(QueryPlannerHashedTest, SortWhenNonHashedFieldIsPrefix) {
// Verify that query predicate and sort on non-hashed fields can be answered without fetching
// the document, but require a sort stage, if the 'sort' field is not immediately after 'query'
// field in the index.
- runQueryAsCommand(fromjson("{filter: {x: 3}, sort: {a: 1}, projection: {_id: 0, y: 1}}"));
+ runQueryAsCommand(
+ fromjson("{find: 'test', filter: {x: 3}, sort: {a: 1}, projection: {_id: 0, y: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{proj: {spec: {_id: 0, y: 1}, node: "
@@ -657,7 +664,8 @@ TEST_F(QueryPlannerHashedTest, SortWhenNonHashedFieldIsPrefix) {
"[['MinKey','MaxKey',true,true]], a: [['MinKey','MaxKey',true,true]]} }} }} }}");
// Verify that sort on a hashed field requires a fetch and a blocking sort stage.
- runQueryAsCommand(fromjson("{filter: {x: 3}, sort: {z: 1}, projection: {_id: 0, y: 1}}"));
+ runQueryAsCommand(
+ fromjson("{find: 'test', filter: {x: 3}, sort: {z: 1}, projection: {_id: 0, y: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{proj: {spec: {_id: 0, y: 1}, node: "
@@ -675,8 +683,8 @@ TEST_F(QueryPlannerHashedTest, SortWithMissingOrIrrelevantQueryPredicate) {
// Verify that a sort on non-hashed fields doesn't require any additional sort stages. The
// entire operation can be answered by the index. Also verify that if the projection only
// includes non-hashed index fields, plan does not use a fetch stage.
- runQueryAsCommand(
- fromjson("{filter: {}, sort: {x: 1, y: -1}, projection: {_id: 0, x: 1, y: 1, a: 1}}"));
+ runQueryAsCommand(fromjson(
+ "{find: 'test', filter: {}, sort: {x: 1, y: -1}, projection: {_id: 0, x: 1, y: 1, a: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{proj: {spec: {_id: 0, x: 1, y: 1, a: 1}, node: {ixscan: {pattern: {x: 1, y: -1, z: "
@@ -686,7 +694,7 @@ TEST_F(QueryPlannerHashedTest, SortWithMissingOrIrrelevantQueryPredicate) {
// Verify that a sort on non-hashed fields with a query predicate on fields irrelevant to the
// index, doesn't require any additional sort stages.
- runQueryAsCommand(fromjson("{filter: {p: 5}, sort: {x: 1, y: -1}}"));
+ runQueryAsCommand(fromjson("{find: 'test', filter: {p: 5}, sort: {x: 1, y: -1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{fetch: {filter: {p: {$eq: 5}}, node: {ixscan: {pattern: {x: 1, y: -1, z: 'hashed', a: "
@@ -696,8 +704,8 @@ TEST_F(QueryPlannerHashedTest, SortWithMissingOrIrrelevantQueryPredicate) {
// Verify that a sort on non-hashed fields doesn't require any additional sort stages. The
// entire operation can be answered by the index. Also verify that if the projection includes
// hashed fields, the operation will require a fetch stage.
- runQueryAsCommand(
- fromjson("{filter: {}, sort: {x: 1, y: -1}, projection: {_id: 0, x: 1, z: 1}}"));
+ runQueryAsCommand(fromjson(
+ "{find: 'test', filter: {}, sort: {x: 1, y: -1}, projection: {_id: 0, x: 1, z: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{proj: {spec: {_id: 0, x: 1, z: 1}, node: {fetch: {filter: null, node: {ixscan: "
@@ -794,7 +802,7 @@ TEST_F(QueryPlannerHashedTest, ChooseHashedIndexHint) {
<< "y" << 1));
addIndex(BSON("x" << 1));
- runQueryAsCommand(fromjson("{filter: {x: 3}, hint: {x: 'hashed', y: 1}}"));
+ runQueryAsCommand(fromjson("{find: 'test', filter: {x: 3}, hint: {x: 'hashed', y: 1}}"));
assertNumSolutions(1U);
@@ -808,7 +816,8 @@ TEST_F(QueryPlannerHashedTest, ChooseHashedIndexHintWithOr) {
addIndex(BSON("y" << 1));
- runQueryAsCommand(fromjson("{filter: {$or: [{x: 1}, {y: 1}]}, hint: {x: 'hashed', y: 1}}"));
+ runQueryAsCommand(
+ fromjson("{find: 'test', filter: {$or: [{x: 1}, {y: 1}]}, hint: {x: 'hashed', y: 1}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{fetch: {filter: {$or: [{x: 1}, {y: 1}]}, node: {ixscan: {pattern: {x: 'hashed', y: 1}, "
@@ -925,7 +934,7 @@ TEST_F(QueryPlannerHashedTest, BasicSkip) {
<< "hashed"
<< "y" << 1));
- runQueryAsCommand(fromjson("{filter: {x: 5}, skip: 8}"));
+ runQueryAsCommand(fromjson("{find: 'test', filter: {x: 5}, skip: 8}"));
assertNumSolutions(1U);
// Verify that the plan has 'skip' stage and uses index.
@@ -941,7 +950,7 @@ TEST_F(QueryPlannerHashedTest, BasicLimit) {
<< "hashed"
<< "y" << 1));
- runQueryAsCommand(fromjson("{filter: {x: 5}, limit: 5}"));
+ runQueryAsCommand(fromjson("{find: 'test', filter: {x: 5}, limit: 5}"));
assertNumSolutions(1U);
// Verify that the plan has 'limit' stage and uses index.
@@ -971,9 +980,9 @@ TEST_F(QueryPlannerHashedTest, MinMaxParameter) {
fromjson("{x: 1}") // max.
);
- runQueryAsCommand(
- fromjson("{filter: {x: 5}, hint: {x: 'hashed', y: 1}, min: {x: NumberLong(1), y: 2}, "
- "max: {x: NumberLong(2), y: 2}}"));
+ runQueryAsCommand(fromjson(
+ "{find: 'test', filter: {x: 5}, hint: {x: 'hashed', y: 1}, min: {x: NumberLong(1), y: 2}, "
+ "max: {x: NumberLong(2), y: 2}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{fetch: {filter: {x: {$eq: 5}}, node: {ixscan: {filter: null, pattern: {x: 'hashed', y: "
@@ -993,9 +1002,9 @@ TEST_F(QueryPlannerHashedTest, MinMaxParameter) {
fromjson("{x: 1}") // max.
);
- runQueryAsCommand(
- fromjson("{filter: {x: 5}, hint: {x: 1, y: 'hashed'}, min: {x: NumberLong(1), y: 2}, "
- "max: {x: NumberLong(2), y: 2}}"));
+ runQueryAsCommand(fromjson(
+ "{find: 'test', filter: {x: 5}, hint: {x: 1, y: 'hashed'}, min: {x: NumberLong(1), y: 2}, "
+ "max: {x: NumberLong(2), y: 2}}"));
assertNumSolutions(1U);
assertSolutionExists(
"{fetch: {filter: {x : {$eq: 5}}, node: {ixscan: {filter: null, pattern: {x: 1, y: "
diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp
index 6307f9cc9d3..9d2929d2b33 100644
--- a/src/mongo/db/query/query_planner_test_fixture.cpp
+++ b/src/mongo/db/query/query_planner_test_fixture.cpp
@@ -332,7 +332,7 @@ void QueryPlannerTest::runQueryFull(const BSONObj& query,
if (ntoreturn < 0) {
ASSERT_NE(ntoreturn, std::numeric_limits<long long>::min());
ntoreturn = -ntoreturn;
- qr->setWantMore(false);
+ qr->setSingleBatchField(true);
}
qr->setNToReturn(ntoreturn);
}
@@ -412,7 +412,7 @@ void QueryPlannerTest::runInvalidQueryFull(const BSONObj& query,
if (ntoreturn < 0) {
ASSERT_NE(ntoreturn, std::numeric_limits<long long>::min());
ntoreturn = -ntoreturn;
- qr->setWantMore(false);
+ qr->setSingleBatchField(true);
}
qr->setNToReturn(ntoreturn);
}
@@ -433,14 +433,17 @@ void QueryPlannerTest::runInvalidQueryFull(const BSONObj& query,
ASSERT_NOT_OK(plannerStatus);
}
+
void QueryPlannerTest::runQueryAsCommand(const BSONObj& cmdObj) {
clearState();
invariant(nss.isValid());
const bool isExplain = false;
- std::unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ // If there is no '$db', append it.
+ auto cmd = OpMsgRequest::fromDBAndBody(nss.db(), cmdObj).body;
+ std::unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmd, isExplain, nss));
auto statusWithCQ =
CanonicalQuery::canonicalize(opCtx.get(),
@@ -462,8 +465,10 @@ void QueryPlannerTest::runInvalidQueryAsCommand(const BSONObj& cmdObj) {
invariant(nss.isValid());
const bool isExplain = false;
- std::unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+
+ // If there is no '$db', append it.
+ auto cmd = OpMsgRequest::fromDBAndBody(nss.db(), cmdObj).body;
+ std::unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmd, isExplain, nss));
auto statusWithCQ =
CanonicalQuery::canonicalize(opCtx.get(),
diff --git a/src/mongo/db/query/query_planner_tree_test.cpp b/src/mongo/db/query/query_planner_tree_test.cpp
index 2883079a84f..bdc43343f30 100644
--- a/src/mongo/db/query/query_planner_tree_test.cpp
+++ b/src/mongo/db/query/query_planner_tree_test.cpp
@@ -1188,7 +1188,8 @@ TEST_F(QueryPlannerTest, CompoundAndNonCompoundIndices) {
//
TEST_F(QueryPlannerTest, SortOrderOnEqualitiesDoesNotMatter) {
addIndex(BSON("a" << 1 << "b" << 1 << "c" << 1));
- runQueryAsCommand(fromjson("{filter: {a: 1, b: 'b'}, sort:{a: -1, b: -1, c: 1}}"));
+ runQueryAsCommand(
+ fromjson("{find: 'test', filter: {a: 1, b: 'b'}, sort:{a: -1, b: -1, c: 1}}"));
ASSERT_EQUALS(getNumSolutions(), 2U);
// Verify that the solution doesn't require a sort stage.
@@ -1200,7 +1201,7 @@ TEST_F(QueryPlannerTest, SortOrderOnEqualitiesDoesNotMatter) {
TEST_F(QueryPlannerTest, NonIndexEqualitiesNotProvided) {
addIndex(BSON("a" << 1));
- runQueryAsCommand(fromjson("{filter: {a: 1, b: 1}, sort:{a: 1, b: 1}}"));
+ runQueryAsCommand(fromjson("{find: 'test', filter: {a: 1, b: 1}, sort:{a: 1, b: 1}}"));
ASSERT_EQUALS(getNumSolutions(), 2U);
// Verify that we use 'sort' stage because 'b' is not part of the index.
diff --git a/src/mongo/db/query/query_planner_wildcard_index_test.cpp b/src/mongo/db/query/query_planner_wildcard_index_test.cpp
index 9d0b5b2df06..7ebcafb4c21 100644
--- a/src/mongo/db/query/query_planner_wildcard_index_test.cpp
+++ b/src/mongo/db/query/query_planner_wildcard_index_test.cpp
@@ -1845,7 +1845,8 @@ TEST_F(QueryPlannerWildcardTest, CanProduceSortMergePlanWithWildcardIndex) {
internalQueryEnumerationMaxOrSolutions.store(5);
addWildcardIndex(BSON("$**" << 1));
addIndex(BSON("a" << 1 << "b" << 1));
- runQueryAsCommand(fromjson("{filter: {$or: [{a: 1, b: 1}, {b: 2}]}, sort: {b: -1}}"));
+ runQueryAsCommand(
+ fromjson("{find: 'testns', filter: {$or: [{a: 1, b: 1}, {b: 2}]}, sort: {b: -1}}"));
assertNumSolutions(3U);
assertSolutionExists(
"{fetch: {filter: null, node: {mergeSort: {nodes: ["
diff --git a/src/mongo/db/query/query_request.cpp b/src/mongo/db/query/query_request.cpp
index c2c4e2c73b7..90c7bf0fcf6 100644
--- a/src/mongo/db/query/query_request.cpp
+++ b/src/mongo/db/query/query_request.cpp
@@ -43,362 +43,54 @@
#include "mongo/db/repl/read_concern_args.h"
#include "mongo/idl/command_generic_argument.h"
#include "mongo/util/assert_util.h"
+#include "mongo/util/stacktrace.h"
#include "mongo/util/str.h"
namespace mongo {
-namespace {
-
-Status checkFieldType(const BSONElement& el, BSONType type) {
- if (type != el.type()) {
- str::stream ss;
- ss << "Failed to parse: " << el.toString() << ". "
- << "'" << el.fieldName() << "' field must be of BSON type " << typeName(type) << ".";
- return Status(ErrorCodes::FailedToParse, ss);
+QueryRequest::QueryRequest(NamespaceStringOrUUID nssOrUuid, bool preferNssForSerialization)
+ : _findCommand(std::move(nssOrUuid)) {
+ if (preferNssForSerialization) {
+ _findCommand.getNamespaceOrUUID().preferNssForSerialization();
}
-
- return Status::OK();
}
-
-} // namespace
-
-QueryRequest::QueryRequest(NamespaceStringOrUUID nssOrUuid)
- : _nss(nssOrUuid.nss() ? *nssOrUuid.nss() : NamespaceString()), _uuid(nssOrUuid.uuid()) {}
+QueryRequest::QueryRequest(FindCommand findCommand) : _findCommand(std::move(findCommand)) {
+ _findCommand.getNamespaceOrUUID().preferNssForSerialization();
+}
void QueryRequest::refreshNSS(const NamespaceString& nss) {
- if (_uuid) {
- _nss = nss;
+ if (_findCommand.getNamespaceOrUUID().uuid()) {
+ auto& nssOrUUID = _findCommand.getNamespaceOrUUID();
+ nssOrUUID.setNss(nss);
}
- invariant(!_nss.isEmpty());
+ invariant(_findCommand.getNamespaceOrUUID().nss());
}
// static
-StatusWith<std::unique_ptr<QueryRequest>> QueryRequest::parseFromFindCommand(
- std::unique_ptr<QueryRequest> qr, const BSONObj& cmdObj, bool isExplain) {
- qr->_explain = isExplain;
- bool tailable = false;
- bool awaitData = false;
-
- // Parse the command BSON by looping through one element at a time.
- BSONObjIterator it(cmdObj);
- while (it.more()) {
- BSONElement el = it.next();
- const auto fieldName = el.fieldNameStringData();
- if (fieldName == kFindCommandName) {
- // Check both UUID and String types for "find" field.
- Status status = checkFieldType(el, BinData);
- if (!status.isOK()) {
- status = checkFieldType(el, String);
- }
- if (!status.isOK()) {
- return status;
- }
- } else if (fieldName == kFilterField) {
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
-
- qr->_filter = el.Obj().getOwned();
- } else if (fieldName == kProjectionField) {
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
-
- qr->_proj = el.Obj().getOwned();
- } else if (fieldName == kSortField) {
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
-
- qr->_sort = el.Obj().getOwned();
- } else if (fieldName == kHintField) {
- BSONObj hintObj;
- if (Object == el.type()) {
- hintObj = cmdObj["hint"].Obj().getOwned();
- } else if (String == el.type()) {
- hintObj = el.wrap("$hint");
- } else {
- return Status(ErrorCodes::FailedToParse,
- "hint must be either a string or nested object");
- }
-
- qr->_hint = hintObj;
- } else if (fieldName == repl::ReadConcernArgs::kReadConcernFieldName) {
- // Read concern parsing is handled elsewhere, but we store a copy here.
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
-
- qr->_readConcern = el.Obj().getOwned();
- } else if (fieldName == QueryRequest::kUnwrappedReadPrefField) {
- // Read preference parsing is handled elsewhere, but we store a copy here.
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
-
- qr->setUnwrappedReadPref(el.Obj());
- } else if (fieldName == kCollationField) {
- // Collation parsing is handled elsewhere, but we store a copy here.
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
-
- qr->_collation = el.Obj().getOwned();
- } else if (fieldName == kSkipField) {
- if (!el.isNumber()) {
- str::stream ss;
- ss << "Failed to parse: " << cmdObj.toString() << ". "
- << "'skip' field must be numeric.";
- return Status(ErrorCodes::FailedToParse, ss);
- }
-
- long long skip = el.numberLong();
-
- // A skip value of 0 means that there is no skip.
- if (skip) {
- qr->_skip = skip;
- }
- } else if (fieldName == kLimitField) {
- if (!el.isNumber()) {
- str::stream ss;
- ss << "Failed to parse: " << cmdObj.toString() << ". "
- << "'limit' field must be numeric.";
- return Status(ErrorCodes::FailedToParse, ss);
- }
-
- long long limit = el.numberLong();
-
- // A limit value of 0 means that there is no limit.
- if (limit) {
- qr->_limit = limit;
- }
- } else if (fieldName == kBatchSizeField) {
- if (!el.isNumber()) {
- str::stream ss;
- ss << "Failed to parse: " << cmdObj.toString() << ". "
- << "'batchSize' field must be numeric.";
- return Status(ErrorCodes::FailedToParse, ss);
- }
-
- qr->_batchSize = el.numberLong();
- } else if (fieldName == kNToReturnField) {
- if (!el.isNumber()) {
- str::stream ss;
- ss << "Failed to parse: " << cmdObj.toString() << ". "
- << "'ntoreturn' field must be numeric.";
- return Status(ErrorCodes::FailedToParse, ss);
- }
-
- qr->_ntoreturn = el.numberLong();
- } else if (fieldName == kSingleBatchField) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
-
- qr->_wantMore = !el.boolean();
- } else if (fieldName == kAllowDiskUseField) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
-
- qr->_allowDiskUse = el.boolean();
- } else if (fieldName == cmdOptionMaxTimeMS) {
- StatusWith<int> maxTimeMS = parseMaxTimeMS(el);
- if (!maxTimeMS.isOK()) {
- return maxTimeMS.getStatus();
- }
-
- qr->_maxTimeMS = maxTimeMS.getValue();
- } else if (fieldName == kMinField) {
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
-
- qr->_min = el.Obj().getOwned();
- } else if (fieldName == kMaxField) {
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
-
- qr->_max = el.Obj().getOwned();
- } else if (fieldName == kReturnKeyField) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
-
- qr->_returnKey = el.boolean();
- } else if (fieldName == kShowRecordIdField) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
-
- qr->_showRecordId = el.boolean();
- } else if (fieldName == kTailableField) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
-
- tailable = el.boolean();
- } else if (fieldName == kOplogReplayField) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
-
- // Ignore the 'oplogReplay' field for compatibility with old clients. Nodes 4.4 and
- // greater will apply the 'oplogReplay' optimization to eligible oplog scans regardless
- // of whether the flag is set explicitly, so the flag is no longer meaningful.
- } else if (fieldName == kNoCursorTimeoutField) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
-
- qr->_noCursorTimeout = el.boolean();
- } else if (fieldName == kAwaitDataField) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
-
- awaitData = el.boolean();
- } else if (fieldName == kPartialResultsField) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
-
- qr->_allowPartialResults = el.boolean();
- } else if (fieldName == kLegacyRuntimeConstantsField) {
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
- qr->_legacyRuntimeConstants =
- LegacyRuntimeConstants::parse(IDLParserErrorContext(kLegacyRuntimeConstantsField),
- cmdObj.getObjectField(kLegacyRuntimeConstantsField));
- } else if (fieldName == kLetField) {
- if (auto status = checkFieldType(el, Object); !status.isOK())
- return status;
- qr->_letParameters = el.Obj().getOwned();
- } else if (fieldName == kOptionsField) {
- // 3.0.x versions of the shell may generate an explain of a find command with an
- // 'options' field. We accept this only if the 'options' field is empty so that
- // the shell's explain implementation is forwards compatible.
- //
- // TODO: Remove for 3.4.
- if (!qr->isExplain()) {
- return Status(ErrorCodes::FailedToParse,
- str::stream() << "Field '" << kOptionsField
- << "' is only allowed for explain.");
- }
-
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
-
- BSONObj optionsObj = el.Obj();
- if (!optionsObj.isEmpty()) {
- return Status(ErrorCodes::FailedToParse,
- str::stream() << "Failed to parse options: " << optionsObj.toString()
- << ". You may need to update your shell or driver.");
- }
- } else if (fieldName == kShardVersionField) {
- // Shard version parsing is handled elsewhere.
- } else if (fieldName == kTermField) {
- Status status = checkFieldType(el, NumberLong);
- if (!status.isOK()) {
- return status;
- }
- qr->_replicationTerm = el._numberLong();
- } else if (fieldName == kReadOnceField) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
+std::unique_ptr<QueryRequest> QueryRequest::makeFromFindCommand(
+ const BSONObj& cmdObj, bool isExplain, boost::optional<NamespaceString> nss) {
+ auto qr = std::make_unique<QueryRequest>(
+ FindCommand::parse(IDLParserErrorContext("FindCommand"), cmdObj));
- qr->_readOnce = el.boolean();
- } else if (fieldName == kAllowSpeculativeMajorityReadField) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- qr->_allowSpeculativeMajorityRead = el.boolean();
- } else if (fieldName == kResumeAfterField) {
- Status status = checkFieldType(el, Object);
- if (!status.isOK()) {
- return status;
- }
- qr->_resumeAfter = el.embeddedObject();
- } else if (fieldName == kRequestResumeTokenField) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- qr->_requestResumeToken = el.boolean();
- } else if (fieldName == kUse44SortKeys) {
- Status status = checkFieldType(el, Bool);
- if (!status.isOK()) {
- return status;
- }
- } else if (isMongocryptdArgument(fieldName)) {
- return Status(ErrorCodes::FailedToParse,
- str::stream()
- << "Failed to parse: " << cmdObj.toString()
- << ". Unrecognized field '" << fieldName
- << "'. This command may be meant for a mongocryptd process.");
-
- // TODO SERVER-47065: A 4.7+ node still has to accept the '_use44SortKeys' field, since
- // it could be included in a command sent from a 4.4 mongos. When 5.0 becomes last-lts,
- // this code to tolerate the '_use44SortKeys' field can be deleted.
- } else if (!isGenericArgument(fieldName)) {
- return Status(ErrorCodes::FailedToParse,
- str::stream() << "Failed to parse: " << cmdObj.toString() << ". "
- << "Unrecognized field '" << fieldName << "'.");
- }
+ // If there is an explicit namespace specified overwite it.
+ if (nss) {
+ qr->setNSS(*nss);
}
- auto tailableMode = tailableModeFromBools(tailable, awaitData);
- if (!tailableMode.isOK()) {
- return tailableMode.getStatus();
- }
- qr->_tailableMode = tailableMode.getValue();
+ qr->_tailableMode =
+ uassertStatusOK(tailableModeFromBools(qr->getTailable(), qr->getAwaitData()));
+
+ qr->_explain = isExplain;
qr->addMetaProjection();
- Status validateStatus = qr->validate();
- if (!validateStatus.isOK()) {
- return validateStatus;
+ if (qr->getSkip() && *qr->getSkip() == 0) {
+ qr->setSkip(boost::none);
}
-
- return std::move(qr);
-}
-
-StatusWith<std::unique_ptr<QueryRequest>> QueryRequest::makeFromFindCommand(NamespaceString nss,
- const BSONObj& cmdObj,
- bool isExplain) {
- BSONElement first = cmdObj.firstElement();
- if (first.type() == BinData && first.binDataType() == BinDataType::newUUID) {
- auto uuid = uassertStatusOK(UUID::parse(first));
- auto qr = std::make_unique<QueryRequest>(NamespaceStringOrUUID(nss.db().toString(), uuid));
- return parseFromFindCommand(std::move(qr), cmdObj, isExplain);
- } else {
- auto qr = std::make_unique<QueryRequest>(nss);
- return parseFromFindCommand(std::move(qr), cmdObj, isExplain);
+ if (qr->getLimit() && *qr->getLimit() == 0) {
+ qr->setLimit(boost::none);
}
+ uassertStatusOK(qr->validate());
+ return qr;
}
BSONObj QueryRequest::asFindCommand() const {
@@ -407,236 +99,105 @@ BSONObj QueryRequest::asFindCommand() const {
return bob.obj();
}
-BSONObj QueryRequest::asFindCommandWithUuid() const {
- BSONObjBuilder bob;
- asFindCommandWithUuid(&bob);
- return bob.obj();
-}
-
void QueryRequest::asFindCommand(BSONObjBuilder* cmdBuilder) const {
- cmdBuilder->append(kFindCommandName, _nss.coll());
- asFindCommandInternal(cmdBuilder);
-}
-
-void QueryRequest::asFindCommandWithUuid(BSONObjBuilder* cmdBuilder) const {
- invariant(_uuid);
- _uuid->appendToBuilder(cmdBuilder, kFindCommandName);
- asFindCommandInternal(cmdBuilder);
-}
-
-void QueryRequest::asFindCommandInternal(BSONObjBuilder* cmdBuilder) const {
- if (!_filter.isEmpty()) {
- cmdBuilder->append(kFilterField, _filter);
- }
-
- if (!_proj.isEmpty()) {
- cmdBuilder->append(kProjectionField, _proj);
- }
-
- if (!_sort.isEmpty()) {
- cmdBuilder->append(kSortField, _sort);
- }
-
- if (!_hint.isEmpty()) {
- cmdBuilder->append(kHintField, _hint);
- }
-
- if (_readConcern) {
- cmdBuilder->append(repl::ReadConcernArgs::kReadConcernFieldName, *_readConcern);
- }
-
- if (!_collation.isEmpty()) {
- cmdBuilder->append(kCollationField, _collation);
- }
-
- if (_skip) {
- cmdBuilder->append(kSkipField, *_skip);
- }
-
- if (_ntoreturn) {
- cmdBuilder->append(kNToReturnField, *_ntoreturn);
- }
-
- if (_limit) {
- cmdBuilder->append(kLimitField, *_limit);
- }
-
- if (_allowDiskUse) {
- cmdBuilder->append(kAllowDiskUseField, true);
- }
-
- if (_batchSize) {
- cmdBuilder->append(kBatchSizeField, *_batchSize);
- }
-
- if (!_wantMore) {
- cmdBuilder->append(kSingleBatchField, true);
- }
-
- if (_maxTimeMS > 0) {
- cmdBuilder->append(cmdOptionMaxTimeMS, _maxTimeMS);
- }
-
- if (!_max.isEmpty()) {
- cmdBuilder->append(kMaxField, _max);
- }
-
- if (!_min.isEmpty()) {
- cmdBuilder->append(kMinField, _min);
- }
-
- if (_returnKey) {
- cmdBuilder->append(kReturnKeyField, true);
- }
-
- if (_showRecordId) {
- cmdBuilder->append(kShowRecordIdField, true);
- }
-
- switch (_tailableMode) {
- case TailableModeEnum::kTailable: {
- cmdBuilder->append(kTailableField, true);
- break;
- }
- case TailableModeEnum::kTailableAndAwaitData: {
- cmdBuilder->append(kTailableField, true);
- cmdBuilder->append(kAwaitDataField, true);
- break;
- }
- case TailableModeEnum::kNormal: {
- break;
- }
- }
-
- if (_noCursorTimeout) {
- cmdBuilder->append(kNoCursorTimeoutField, true);
- }
-
- if (_allowPartialResults) {
- cmdBuilder->append(kPartialResultsField, true);
- }
-
- if (_legacyRuntimeConstants) {
- BSONObjBuilder rtcBuilder(cmdBuilder->subobjStart(kLegacyRuntimeConstantsField));
- _legacyRuntimeConstants->serialize(&rtcBuilder);
- rtcBuilder.doneFast();
- }
-
- if (_letParameters) {
- cmdBuilder->append(kLetField, *_letParameters);
- }
-
- if (_replicationTerm) {
- cmdBuilder->append(kTermField, *_replicationTerm);
- }
-
- if (_readOnce) {
- cmdBuilder->append(kReadOnceField, true);
- }
-
- if (_allowSpeculativeMajorityRead) {
- cmdBuilder->append(kAllowSpeculativeMajorityReadField, true);
- }
-
- if (_requestResumeToken) {
- cmdBuilder->append(kRequestResumeTokenField, _requestResumeToken);
- }
-
- if (!_resumeAfter.isEmpty()) {
- cmdBuilder->append(kResumeAfterField, _resumeAfter);
- }
+ _findCommand.serialize(BSONObj(), cmdBuilder);
}
void QueryRequest::addShowRecordIdMetaProj() {
- if (_proj["$recordId"]) {
+ if (getProj()["$recordId"]) {
// There's already some projection on $recordId. Don't overwrite it.
return;
}
BSONObjBuilder projBob;
- projBob.appendElements(_proj);
+ projBob.appendElements(getProj());
BSONObj metaRecordId = BSON("$recordId" << BSON("$meta" << QueryRequest::metaRecordId));
projBob.append(metaRecordId.firstElement());
- _proj = projBob.obj();
+ setProj(projBob.obj());
}
Status QueryRequest::validate() const {
// Min and Max objects must have the same fields.
- if (!_min.isEmpty() && !_max.isEmpty()) {
- if (!_min.isFieldNamePrefixOf(_max) || (_min.nFields() != _max.nFields())) {
+ if (!getMin().isEmpty() && !getMax().isEmpty()) {
+ if (!getMin().isFieldNamePrefixOf(getMax()) || (getMin().nFields() != getMax().nFields())) {
return Status(ErrorCodes::Error(51176), "min and max must have the same field names");
}
}
- if ((_limit || _batchSize) && _ntoreturn) {
+ if ((getLimit() || getBatchSize()) && getNToReturn()) {
return Status(ErrorCodes::BadValue,
"'limit' or 'batchSize' fields can not be set with 'ntoreturn' field.");
}
- if (_skip && *_skip < 0) {
+ // TODO SERVER-53060: When legacy query request is seperated, these validations can be moved to
+ // IDL.
+ if (getSkip() && *getSkip() < 0) {
return Status(ErrorCodes::BadValue,
- str::stream() << "Skip value must be non-negative, but received: " << *_skip);
+ str::stream()
+ << "Skip value must be non-negative, but received: " << *getSkip());
}
- if (_limit && *_limit < 0) {
+ if (getLimit() && *getLimit() < 0) {
return Status(ErrorCodes::BadValue,
str::stream()
- << "Limit value must be non-negative, but received: " << *_limit);
+ << "Limit value must be non-negative, but received: " << *getLimit());
}
- if (_batchSize && *_batchSize < 0) {
+ if (getBatchSize() && *getBatchSize() < 0) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "BatchSize value must be non-negative, but received: " << *_batchSize);
+ str::stream() << "BatchSize value must be non-negative, but received: "
+ << *getBatchSize());
}
- if (_ntoreturn && *_ntoreturn < 0) {
+ if (getNToReturn() && *getNToReturn() < 0) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "NToReturn value must be non-negative, but received: " << *_ntoreturn);
+ str::stream() << "NToReturn value must be non-negative, but received: "
+ << *getNToReturn());
}
- if (_maxTimeMS < 0) {
+ if (getMaxTimeMS() < 0) {
return Status(ErrorCodes::BadValue,
- str::stream()
- << "MaxTimeMS value must be non-negative, but received: " << _maxTimeMS);
+ str::stream() << "MaxTimeMS value must be non-negative, but received: "
+ << getMaxTimeMS());
}
if (_tailableMode != TailableModeEnum::kNormal) {
// Tailable cursors cannot have any sort other than {$natural: 1}.
const BSONObj expectedSort = BSON(kNaturalSortField << 1);
- if (!_sort.isEmpty() &&
- SimpleBSONObjComparator::kInstance.evaluate(_sort != expectedSort)) {
+ if (!getSort().isEmpty() &&
+ SimpleBSONObjComparator::kInstance.evaluate(getSort() != expectedSort)) {
return Status(ErrorCodes::BadValue,
"cannot use tailable option with a sort other than {$natural: 1}");
}
// Cannot indicate that you want a 'singleBatch' if the cursor is tailable.
- if (!_wantMore) {
+ if (isSingleBatch()) {
return Status(ErrorCodes::BadValue,
"cannot use tailable option with the 'singleBatch' option");
}
}
- if (_requestResumeToken) {
- if (SimpleBSONObjComparator::kInstance.evaluate(_hint != BSON(kNaturalSortField << 1))) {
+ if (getRequestResumeToken()) {
+ if (SimpleBSONObjComparator::kInstance.evaluate(getHint() !=
+ BSON(kNaturalSortField << 1))) {
return Status(ErrorCodes::BadValue,
"hint must be {$natural:1} if 'requestResumeToken' is enabled");
}
- if (!_sort.isEmpty() &&
- SimpleBSONObjComparator::kInstance.evaluate(_sort != BSON(kNaturalSortField << 1))) {
+ if (!getSort().isEmpty() &&
+ SimpleBSONObjComparator::kInstance.evaluate(getSort() !=
+ BSON(kNaturalSortField << 1))) {
return Status(ErrorCodes::BadValue,
"sort must be unset or {$natural:1} if 'requestResumeToken' is enabled");
}
- if (!_resumeAfter.isEmpty()) {
- if (_resumeAfter.nFields() != 1 ||
- _resumeAfter["$recordId"].type() != BSONType::NumberLong) {
+ if (!getResumeAfter().isEmpty()) {
+ if (getResumeAfter().nFields() != 1 ||
+ getResumeAfter()["$recordId"].type() != BSONType::NumberLong) {
return Status(ErrorCodes::BadValue,
"Malformed resume token: the '_resumeAfter' object must contain"
" exactly one field named '$recordId', of type NumberLong.");
}
}
- } else if (!_resumeAfter.isEmpty()) {
+ } else if (!getResumeAfter().isEmpty()) {
return Status(ErrorCodes::BadValue,
"'requestResumeToken' must be true if 'resumeAfter' is"
" specified");
@@ -673,6 +234,10 @@ StatusWith<int> QueryRequest::parseMaxTimeMS(BSONElement maxTimeMSElt) {
return StatusWith<int>(static_cast<int>(maxTimeMSLongLong));
}
+int32_t QueryRequest::parseMaxTimeMSForIDL(BSONElement maxTimeMSElt) {
+ return static_cast<int32_t>(uassertStatusOK(QueryRequest::parseMaxTimeMS(maxTimeMSElt)));
+}
+
bool QueryRequest::isTextScoreMeta(BSONElement elt) {
// elt must be foo: {$meta: "textScore"}
if (mongo::Object != elt.type()) {
@@ -725,7 +290,8 @@ StatusWith<std::unique_ptr<QueryRequest>> QueryRequest::fromLegacyQuery(
int ntoskip,
int ntoreturn,
int queryOptions) {
- auto qr = std::make_unique<QueryRequest>(nsOrUuid);
+ // Legacy command should prefer to serialize with UUID.
+ auto qr = std::make_unique<QueryRequest>(std::move(nsOrUuid), false);
Status status = qr->init(ntoskip, ntoreturn, queryOptions, queryObj, proj, true);
if (!status.isOK()) {
@@ -741,10 +307,11 @@ Status QueryRequest::init(int ntoskip,
const BSONObj& queryObj,
const BSONObj& proj,
bool fromQueryMessage) {
- _proj = proj.getOwned();
-
+ if (!proj.isEmpty()) {
+ _findCommand.setProjection(proj.getOwned());
+ }
if (ntoskip) {
- _skip = ntoskip;
+ _findCommand.setSkip(ntoskip);
}
if (ntoreturn) {
@@ -753,16 +320,16 @@ Status QueryRequest::init(int ntoskip,
// ntoreturn is negative but can't be negated.
return Status(ErrorCodes::BadValue, "bad ntoreturn value in query");
}
- _ntoreturn = -ntoreturn;
- _wantMore = false;
+ _findCommand.setNtoreturn(-ntoreturn);
+ setSingleBatchField(true);
} else {
- _ntoreturn = ntoreturn;
+ _findCommand.setNtoreturn(ntoreturn);
}
}
// An ntoreturn of 1 is special because it also means to return at most one batch.
- if (_ntoreturn.value_or(0) == 1) {
- _wantMore = false;
+ if (getNToReturn().value_or(0) == 1) {
+ setSingleBatchField(true);
}
// Initialize flags passed as 'queryOptions' bit vector.
@@ -774,21 +341,21 @@ Status QueryRequest::init(int ntoskip,
queryField = queryObj["$query"];
}
if (queryField.isABSONObj()) {
- _filter = queryField.embeddedObject().getOwned();
+ _findCommand.setFilter(queryField.embeddedObject().getOwned());
Status status = initFullQuery(queryObj);
if (!status.isOK()) {
return status;
}
} else {
- _filter = queryObj.getOwned();
+ _findCommand.setFilter(queryObj.getOwned());
}
// It's not possible to specify readConcern in a legacy query message, so initialize it to
// an empty readConcern object, ie. equivalent to `readConcern: {}`. This ensures that
// mongos passes this empty readConcern to shards.
- _readConcern = BSONObj();
+ setReadConcern(BSONObj());
} else {
// This is the debugging code path.
- _filter = queryObj.getOwned();
+ _findCommand.setFilter(queryObj.getOwned());
}
_hasReadPref = queryObj.hasField("$readPreference");
@@ -805,9 +372,9 @@ Status QueryRequest::initFullQuery(const BSONObj& top) {
if (name == "$orderby" || name == "orderby") {
if (Object == e.type()) {
- _sort = e.embeddedObject().getOwned();
+ setSort(e.embeddedObject().getOwned());
} else if (Array == e.type()) {
- _sort = e.embeddedObject();
+ setSort(e.embeddedObject());
// TODO: Is this ever used? I don't think so.
// Quote:
@@ -819,7 +386,7 @@ Status QueryRequest::initFullQuery(const BSONObj& top) {
char p[2] = "0";
while (1) {
- BSONObj j = _sort.getObjectField(p);
+ BSONObj j = getSort().getObjectField(p);
if (j.isEmpty()) {
break;
}
@@ -837,7 +404,7 @@ Status QueryRequest::initFullQuery(const BSONObj& top) {
}
}
- _sort = b.obj();
+ setSort(b.obj());
} else {
return Status(ErrorCodes::BadValue, "sort must be object or array");
}
@@ -850,17 +417,17 @@ Status QueryRequest::initFullQuery(const BSONObj& top) {
if (!e.isABSONObj()) {
return Status(ErrorCodes::BadValue, "$min must be a BSONObj");
}
- _min = e.embeddedObject().getOwned();
+ setMin(e.embeddedObject().getOwned());
} else if (name == "max") {
if (!e.isABSONObj()) {
return Status(ErrorCodes::BadValue, "$max must be a BSONObj");
}
- _max = e.embeddedObject().getOwned();
+ setMax(e.embeddedObject().getOwned());
} else if (name == "hint") {
if (e.isABSONObj()) {
- _hint = e.embeddedObject().getOwned();
+ setHint(e.embeddedObject().getOwned());
} else if (String == e.type()) {
- _hint = e.wrap();
+ setHint(e.wrap());
} else {
return Status(ErrorCodes::BadValue,
"$hint must be either a string or nested object");
@@ -868,12 +435,12 @@ Status QueryRequest::initFullQuery(const BSONObj& top) {
} else if (name == "returnKey") {
// Won't throw.
if (e.trueValue()) {
- _returnKey = true;
+ setReturnKey(true);
}
} else if (name == "showDiskLoc") {
// Won't throw.
if (e.trueValue()) {
- _showRecordId = true;
+ setShowRecordId(true);
addShowRecordIdMetaProj();
}
} else if (name == "maxTimeMS") {
@@ -881,7 +448,7 @@ Status QueryRequest::initFullQuery(const BSONObj& top) {
if (!maxTimeMS.isOK()) {
return maxTimeMS.getStatus();
}
- _maxTimeMS = maxTimeMS.getValue();
+ setMaxTimeMS(maxTimeMS.getValue());
}
}
}
@@ -900,13 +467,13 @@ int QueryRequest::getOptions() const {
if (_slaveOk) {
options |= QueryOption_SecondaryOk;
}
- if (_noCursorTimeout) {
+ if (isNoCursorTimeout()) {
options |= QueryOption_NoCursorTimeout;
}
if (_exhaust) {
options |= QueryOption_Exhaust;
}
- if (_allowPartialResults) {
+ if (isAllowPartialResults()) {
options |= QueryOption_PartialResults;
}
return options;
@@ -915,11 +482,22 @@ int QueryRequest::getOptions() const {
void QueryRequest::initFromInt(int options) {
bool tailable = (options & QueryOption_CursorTailable) != 0;
bool awaitData = (options & QueryOption_AwaitData) != 0;
+ if (awaitData) {
+ _findCommand.setAwaitData(true);
+ }
+ if (tailable) {
+ _findCommand.setTailable(true);
+ }
_tailableMode = uassertStatusOK(tailableModeFromBools(tailable, awaitData));
_slaveOk = (options & QueryOption_SecondaryOk) != 0;
- _noCursorTimeout = (options & QueryOption_NoCursorTimeout) != 0;
_exhaust = (options & QueryOption_Exhaust) != 0;
- _allowPartialResults = (options & QueryOption_PartialResults) != 0;
+
+ if ((options & QueryOption_NoCursorTimeout) != 0) {
+ setNoCursorTimeout(true);
+ }
+ if ((options & QueryOption_PartialResults) != 0) {
+ setAllowPartialResults(true);
+ }
}
void QueryRequest::addMetaProjection() {
@@ -928,147 +506,155 @@ void QueryRequest::addMetaProjection() {
}
}
-boost::optional<long long> QueryRequest::getEffectiveBatchSize() const {
- return _batchSize ? _batchSize : _ntoreturn;
+boost::optional<int64_t> QueryRequest::getEffectiveBatchSize() const {
+ return getBatchSize() ? getBatchSize() : getNToReturn();
}
StatusWith<BSONObj> QueryRequest::asAggregationCommand() const {
BSONObjBuilder aggregationBuilder;
// First, check if this query has options that are not supported in aggregation.
- if (!_min.isEmpty()) {
+ if (!getMin().isEmpty()) {
return {ErrorCodes::InvalidPipelineOperator,
- str::stream() << "Option " << kMinField << " not supported in aggregation."};
+ str::stream() << "Option " << FindCommand::kMinFieldName
+ << " not supported in aggregation."};
}
- if (!_max.isEmpty()) {
+ if (!getMax().isEmpty()) {
return {ErrorCodes::InvalidPipelineOperator,
- str::stream() << "Option " << kMaxField << " not supported in aggregation."};
+ str::stream() << "Option " << FindCommand::kMaxFieldName
+ << " not supported in aggregation."};
}
- if (_returnKey) {
+ if (returnKey()) {
return {ErrorCodes::InvalidPipelineOperator,
- str::stream() << "Option " << kReturnKeyField << " not supported in aggregation."};
+ str::stream() << "Option " << FindCommand::kReturnKeyFieldName
+ << " not supported in aggregation."};
}
- if (_showRecordId) {
+ if (showRecordId()) {
return {ErrorCodes::InvalidPipelineOperator,
- str::stream() << "Option " << kShowRecordIdField
+ str::stream() << "Option " << FindCommand::kShowRecordIdFieldName
<< " not supported in aggregation."};
}
if (isTailable()) {
return {ErrorCodes::InvalidPipelineOperator,
"Tailable cursors are not supported in aggregation."};
}
- if (_noCursorTimeout) {
+ if (isNoCursorTimeout()) {
return {ErrorCodes::InvalidPipelineOperator,
- str::stream() << "Option " << kNoCursorTimeoutField
+ str::stream() << "Option " << FindCommand::kNoCursorTimeoutFieldName
<< " not supported in aggregation."};
}
- if (_allowPartialResults) {
+ if (isAllowPartialResults()) {
return {ErrorCodes::InvalidPipelineOperator,
- str::stream() << "Option " << kPartialResultsField
+ str::stream() << "Option " << FindCommand::kAllowPartialResultsFieldName
<< " not supported in aggregation."};
}
- if (_ntoreturn) {
+ if (getNToReturn()) {
return {ErrorCodes::BadValue,
str::stream() << "Cannot convert to an aggregation if ntoreturn is set."};
}
- if (_sort[kNaturalSortField]) {
+ if (getSort()[kNaturalSortField]) {
return {ErrorCodes::InvalidPipelineOperator,
str::stream() << "Sort option " << kNaturalSortField
<< " not supported in aggregation."};
}
// The aggregation command normally does not support the 'singleBatch' option, but we make a
// special exception if 'limit' is set to 1.
- if (!_wantMore && _limit.value_or(0) != 1LL) {
+ if (isSingleBatch() && getLimit().value_or(0) != 1LL) {
return {ErrorCodes::InvalidPipelineOperator,
- str::stream() << "Option " << kSingleBatchField
+ str::stream() << "Option " << FindCommand::kSingleBatchFieldName
<< " not supported in aggregation."};
}
- if (_readOnce) {
+ if (isReadOnce()) {
return {ErrorCodes::InvalidPipelineOperator,
- str::stream() << "Option " << kReadOnceField << " not supported in aggregation."};
+ str::stream() << "Option " << FindCommand::kReadOnceFieldName
+ << " not supported in aggregation."};
}
- if (_allowSpeculativeMajorityRead) {
+ if (allowSpeculativeMajorityRead()) {
return {ErrorCodes::InvalidPipelineOperator,
- str::stream() << "Option " << kAllowSpeculativeMajorityReadField
+ str::stream() << "Option " << FindCommand::kAllowSpeculativeMajorityReadFieldName
<< " not supported in aggregation."};
}
- if (_requestResumeToken) {
+ if (getRequestResumeToken()) {
return {ErrorCodes::InvalidPipelineOperator,
- str::stream() << "Option " << kRequestResumeTokenField
+ str::stream() << "Option " << FindCommand::kRequestResumeTokenFieldName
<< " not supported in aggregation."};
}
- if (!_resumeAfter.isEmpty()) {
+ if (!getResumeAfter().isEmpty()) {
return {ErrorCodes::InvalidPipelineOperator,
- str::stream() << "Option " << kResumeAfterField
+ str::stream() << "Option " << FindCommand::kResumeAfterFieldName
<< " not supported in aggregation."};
}
// Now that we've successfully validated this QR, begin building the aggregation command.
- aggregationBuilder.append("aggregate", _nss.coll());
+ aggregationBuilder.append("aggregate",
+ _findCommand.getNamespaceOrUUID().nss()
+ ? _findCommand.getNamespaceOrUUID().nss()->coll()
+ : "");
// Construct an aggregation pipeline that finds the equivalent documents to this query request.
BSONArrayBuilder pipelineBuilder(aggregationBuilder.subarrayStart("pipeline"));
- if (!_filter.isEmpty()) {
+ if (!getFilter().isEmpty()) {
BSONObjBuilder matchBuilder(pipelineBuilder.subobjStart());
- matchBuilder.append("$match", _filter);
+ matchBuilder.append("$match", getFilter());
matchBuilder.doneFast();
}
- if (!_sort.isEmpty()) {
+ if (!getSort().isEmpty()) {
BSONObjBuilder sortBuilder(pipelineBuilder.subobjStart());
- sortBuilder.append("$sort", _sort);
+ sortBuilder.append("$sort", getSort());
sortBuilder.doneFast();
}
- if (_skip) {
+ if (getSkip()) {
BSONObjBuilder skipBuilder(pipelineBuilder.subobjStart());
- skipBuilder.append("$skip", *_skip);
+ skipBuilder.append("$skip", *getSkip());
skipBuilder.doneFast();
}
- if (_limit) {
+ if (getLimit()) {
BSONObjBuilder limitBuilder(pipelineBuilder.subobjStart());
- limitBuilder.append("$limit", *_limit);
+ limitBuilder.append("$limit", *getLimit());
limitBuilder.doneFast();
}
- if (!_proj.isEmpty()) {
+ if (!getProj().isEmpty()) {
BSONObjBuilder projectBuilder(pipelineBuilder.subobjStart());
- projectBuilder.append("$project", _proj);
+ projectBuilder.append("$project", getProj());
projectBuilder.doneFast();
}
pipelineBuilder.doneFast();
// The aggregation 'cursor' option is always set, regardless of the presence of batchSize.
BSONObjBuilder batchSizeBuilder(aggregationBuilder.subobjStart("cursor"));
- if (_batchSize) {
- batchSizeBuilder.append(kBatchSizeField, *_batchSize);
+ if (getBatchSize()) {
+ batchSizeBuilder.append(FindCommand::kBatchSizeFieldName, *getBatchSize());
}
batchSizeBuilder.doneFast();
// Other options.
- aggregationBuilder.append("collation", _collation);
- if (_maxTimeMS > 0) {
- aggregationBuilder.append(cmdOptionMaxTimeMS, _maxTimeMS);
+ aggregationBuilder.append("collation", getCollation());
+ if (getMaxTimeMS() > 0) {
+ aggregationBuilder.append(cmdOptionMaxTimeMS, getMaxTimeMS());
}
- if (!_hint.isEmpty()) {
- aggregationBuilder.append("hint", _hint);
+ if (!getHint().isEmpty()) {
+ aggregationBuilder.append(FindCommand::kHintFieldName, getHint());
}
- if (_readConcern) {
- aggregationBuilder.append("readConcern", *_readConcern);
+ if (getReadConcern()) {
+ aggregationBuilder.append("readConcern", *getReadConcern());
}
- if (!_unwrappedReadPref.isEmpty()) {
- aggregationBuilder.append(QueryRequest::kUnwrappedReadPrefField, _unwrappedReadPref);
+ if (!getUnwrappedReadPref().isEmpty()) {
+ aggregationBuilder.append(FindCommand::kUnwrappedReadPrefFieldName, getUnwrappedReadPref());
}
- if (_allowDiskUse) {
- aggregationBuilder.append(QueryRequest::kAllowDiskUseField, _allowDiskUse);
+ if (allowDiskUse()) {
+ aggregationBuilder.append(FindCommand::kAllowDiskUseFieldName, allowDiskUse());
}
- if (_legacyRuntimeConstants) {
- BSONObjBuilder rtcBuilder(aggregationBuilder.subobjStart(kLegacyRuntimeConstantsField));
- _legacyRuntimeConstants->serialize(&rtcBuilder);
+ if (getLegacyRuntimeConstants()) {
+ BSONObjBuilder rtcBuilder(
+ aggregationBuilder.subobjStart(FindCommand::kLegacyRuntimeConstantsFieldName));
+ getLegacyRuntimeConstants()->serialize(&rtcBuilder);
rtcBuilder.doneFast();
}
- if (_letParameters) {
- aggregationBuilder.append(QueryRequest::kLetField, *_letParameters);
+ if (getLetParameters()) {
+ aggregationBuilder.append(FindCommand::kLetFieldName, *getLetParameters());
}
return StatusWith<BSONObj>(aggregationBuilder.obj());
}
diff --git a/src/mongo/db/query/query_request.h b/src/mongo/db/query/query_request.h
index 96b897ad683..37d12b0d04f 100644
--- a/src/mongo/db/query/query_request.h
+++ b/src/mongo/db/query/query_request.h
@@ -37,6 +37,7 @@
#include "mongo/db/namespace_string.h"
#include "mongo/db/operation_context.h"
#include "mongo/db/pipeline/legacy_runtime_constants_gen.h"
+#include "mongo/db/query/find_command_gen.h"
#include "mongo/db/query/tailable_mode.h"
namespace mongo {
@@ -52,44 +53,15 @@ class StatusWith;
*/
class QueryRequest {
public:
- // Find command field names.
- static constexpr auto kFilterField = "filter";
- static constexpr auto kProjectionField = "projection";
- static constexpr auto kSortField = "sort";
- static constexpr auto kHintField = "hint";
- static constexpr auto kCollationField = "collation";
- static constexpr auto kSkipField = "skip";
- static constexpr auto kLimitField = "limit";
- static constexpr auto kBatchSizeField = "batchSize";
- static constexpr auto kNToReturnField = "ntoreturn";
- static constexpr auto kSingleBatchField = "singleBatch";
- static constexpr auto kMaxField = "max";
- static constexpr auto kMinField = "min";
- static constexpr auto kReturnKeyField = "returnKey";
- static constexpr auto kShowRecordIdField = "showRecordId";
- static constexpr auto kTailableField = "tailable";
- static constexpr auto kOplogReplayField = "oplogReplay";
- static constexpr auto kNoCursorTimeoutField = "noCursorTimeout";
- static constexpr auto kAwaitDataField = "awaitData";
- static constexpr auto kPartialResultsField = "allowPartialResults";
- static constexpr auto kLegacyRuntimeConstantsField = "runtimeConstants";
- static constexpr auto kLetField = "let";
- static constexpr auto kTermField = "term";
- static constexpr auto kOptionsField = "options";
- static constexpr auto kReadOnceField = "readOnce";
- static constexpr auto kAllowSpeculativeMajorityReadField = "allowSpeculativeMajorityRead";
- static constexpr auto kRequestResumeTokenField = "$_requestResumeToken";
- static constexpr auto kResumeAfterField = "$_resumeAfter";
- static constexpr auto kUse44SortKeys = "_use44SortKeys";
static constexpr auto kMaxTimeMSOpOnlyField = "maxTimeMSOpOnly";
// Field names for sorting options.
static constexpr auto kNaturalSortField = "$natural";
- static constexpr auto kFindCommandName = "find";
static constexpr auto kShardVersionField = "shardVersion";
- explicit QueryRequest(NamespaceStringOrUUID nss);
+ explicit QueryRequest(NamespaceStringOrUUID nss, bool preferNssForSerialization = true);
+ explicit QueryRequest(FindCommand findCommand);
/**
* Returns a non-OK status if any property of the QR has a bad value (e.g. a negative skip
@@ -106,23 +78,29 @@ public:
* Returns a heap allocated QueryRequest on success or an error if 'cmdObj' is not well
* formed.
*/
- static StatusWith<std::unique_ptr<QueryRequest>> makeFromFindCommand(NamespaceString nss,
- const BSONObj& cmdObj,
- bool isExplain);
+ static std::unique_ptr<QueryRequest> makeFromFindCommand(
+ const BSONObj& cmdObj, bool isExplain, boost::optional<NamespaceString> nss = boost::none);
/**
* If _uuid exists for this QueryRequest, update the value of _nss.
*/
void refreshNSS(const NamespaceString& nss);
+ void setNSS(const NamespaceString& nss) {
+ auto& nssOrUuid = _findCommand.getNamespaceOrUUID();
+ nssOrUuid.setNss(nss);
+ }
+
/**
* Converts this QR into a find command.
* The withUuid variants make a UUID-based find command instead of a namespace-based ones.
*/
BSONObj asFindCommand() const;
- BSONObj asFindCommandWithUuid() const;
+
+ /**
+ * Common code for UUID and namespace-based find commands.
+ */
void asFindCommand(BSONObjBuilder* cmdBuilder) const;
- void asFindCommandWithUuid(BSONObjBuilder* cmdBuilder) const;
/**
* Converts this QR into an aggregation using $match. If this QR has options that cannot be
@@ -143,6 +121,8 @@ public:
*/
static StatusWith<int> parseMaxTimeMS(BSONElement maxTimeMSElt);
+ static int32_t parseMaxTimeMSForIDL(BSONElement maxTimeMSElt);
+
/**
* Helper function to identify text search sort key
* Example: {a: {$meta: "textScore"}}
@@ -180,120 +160,120 @@ public:
static constexpr auto kMaxTimeMSOpOnlyMaxPadding = 100LL;
const NamespaceString& nss() const {
- return _nss;
+ if (_findCommand.getNamespaceOrUUID().nss()) {
+ return *_findCommand.getNamespaceOrUUID().nss();
+ } else {
+ static NamespaceString nss = NamespaceString();
+ return nss;
+ }
}
boost::optional<UUID> uuid() const {
- return _uuid;
+ return _findCommand.getNamespaceOrUUID().uuid();
}
const BSONObj& getFilter() const {
- return _filter;
+ return _findCommand.getFilter();
}
void setFilter(BSONObj filter) {
- _filter = filter.getOwned();
+ _findCommand.setFilter(filter.getOwned());
}
const BSONObj& getProj() const {
- return _proj;
+ return _findCommand.getProjection();
}
void setProj(BSONObj proj) {
- _proj = proj.getOwned();
+ _findCommand.setProjection(proj.getOwned());
}
const BSONObj& getSort() const {
- return _sort;
+ return _findCommand.getSort();
}
void setSort(BSONObj sort) {
- _sort = sort.getOwned();
+ _findCommand.setSort(sort.getOwned());
}
const BSONObj& getHint() const {
- return _hint;
+ return _findCommand.getHint();
}
void setHint(BSONObj hint) {
- _hint = hint.getOwned();
+ _findCommand.setHint(hint.getOwned());
}
- const BSONObj& getReadConcern() const {
- if (_readConcern) {
- return *_readConcern;
- } else {
- static const auto empty = BSONObj();
- return empty;
- }
+ boost::optional<BSONObj> getReadConcern() const {
+ return _findCommand.getReadConcern();
}
void setReadConcern(BSONObj readConcern) {
- _readConcern = readConcern.getOwned();
+ _findCommand.setReadConcern(readConcern.getOwned());
}
const BSONObj& getCollation() const {
- return _collation;
+ return _findCommand.getCollation();
}
void setCollation(BSONObj collation) {
- _collation = collation.getOwned();
+ _findCommand.setCollation(collation.getOwned());
}
static constexpr auto kDefaultBatchSize = 101ll;
- boost::optional<long long> getSkip() const {
- return _skip;
+ boost::optional<std::int64_t> getSkip() const {
+ return _findCommand.getSkip();
}
- void setSkip(boost::optional<long long> skip) {
- _skip = skip;
+ void setSkip(boost::optional<std::int64_t> skip) {
+ _findCommand.setSkip(skip);
}
- boost::optional<long long> getLimit() const {
- return _limit;
+ boost::optional<std::int64_t> getLimit() const {
+ return _findCommand.getLimit();
}
- void setLimit(boost::optional<long long> limit) {
- _limit = limit;
+ void setLimit(boost::optional<std::int64_t> limit) {
+ _findCommand.setLimit(limit);
}
- boost::optional<long long> getBatchSize() const {
- return _batchSize;
+ boost::optional<std::int64_t> getBatchSize() const {
+ return _findCommand.getBatchSize();
}
- void setBatchSize(boost::optional<long long> batchSize) {
- _batchSize = batchSize;
+ void setBatchSize(boost::optional<std::int64_t> batchSize) {
+ _findCommand.setBatchSize(batchSize);
}
- boost::optional<long long> getNToReturn() const {
- return _ntoreturn;
+ boost::optional<std::int64_t> getNToReturn() const {
+ return _findCommand.getNtoreturn();
}
- void setNToReturn(boost::optional<long long> ntoreturn) {
- _ntoreturn = ntoreturn;
+ void setNToReturn(boost::optional<std::int64_t> ntoreturn) {
+ _findCommand.setNtoreturn(ntoreturn);
}
/**
* Returns batchSize or ntoreturn value if either is set. If neither is set,
* returns boost::none.
*/
- boost::optional<long long> getEffectiveBatchSize() const;
+ boost::optional<std::int64_t> getEffectiveBatchSize() const;
- bool wantMore() const {
- return _wantMore;
+ bool isSingleBatch() const {
+ return _findCommand.getSingleBatch();
}
- void setWantMore(bool wantMore) {
- _wantMore = wantMore;
+ void setSingleBatchField(bool singleBatch) {
+ _findCommand.setSingleBatch(singleBatch);
}
bool allowDiskUse() const {
- return _allowDiskUse;
+ return _findCommand.getAllowDiskUse();
}
void setAllowDiskUse(bool allowDiskUse) {
- _allowDiskUse = allowDiskUse;
+ _findCommand.setAllowDiskUse(allowDiskUse);
}
bool isExplain() const {
@@ -305,51 +285,51 @@ public:
}
const BSONObj& getUnwrappedReadPref() const {
- return _unwrappedReadPref;
+ return _findCommand.getUnwrappedReadPref();
}
void setUnwrappedReadPref(BSONObj unwrappedReadPref) {
- _unwrappedReadPref = unwrappedReadPref.getOwned();
+ _findCommand.setUnwrappedReadPref(unwrappedReadPref.getOwned());
}
int getMaxTimeMS() const {
- return _maxTimeMS;
+ return _findCommand.getMaxTimeMS() ? static_cast<int>(*_findCommand.getMaxTimeMS()) : 0;
}
void setMaxTimeMS(int maxTimeMS) {
- _maxTimeMS = maxTimeMS;
+ _findCommand.setMaxTimeMS(maxTimeMS);
}
const BSONObj& getMin() const {
- return _min;
+ return _findCommand.getMin();
}
void setMin(BSONObj min) {
- _min = min.getOwned();
+ _findCommand.setMin(min.getOwned());
}
const BSONObj& getMax() const {
- return _max;
+ return _findCommand.getMax();
}
void setMax(BSONObj max) {
- _max = max.getOwned();
+ _findCommand.setMax(max.getOwned());
}
bool returnKey() const {
- return _returnKey;
+ return _findCommand.getReturnKey();
}
void setReturnKey(bool returnKey) {
- _returnKey = returnKey;
+ _findCommand.setReturnKey(returnKey);
}
bool showRecordId() const {
- return _showRecordId;
+ return _findCommand.getShowRecordId();
}
void setShowRecordId(bool showRecordId) {
- _showRecordId = showRecordId;
+ _findCommand.setShowRecordId(showRecordId);
}
bool hasReadPref() const {
@@ -371,6 +351,12 @@ public:
void setTailableMode(TailableModeEnum tailableMode) {
_tailableMode = tailableMode;
+ if (_tailableMode == TailableModeEnum::kTailableAndAwaitData) {
+ _findCommand.setAwaitData(true);
+ _findCommand.setTailable(true);
+ } else if (_tailableMode == TailableModeEnum::kTailable) {
+ _findCommand.setTailable(true);
+ }
}
TailableModeEnum getTailableMode() const {
@@ -378,19 +364,27 @@ public:
}
void setLegacyRuntimeConstants(LegacyRuntimeConstants runtimeConstants) {
- _legacyRuntimeConstants = std::move(runtimeConstants);
+ _findCommand.setLegacyRuntimeConstants(std::move(runtimeConstants));
}
const boost::optional<LegacyRuntimeConstants>& getLegacyRuntimeConstants() const {
- return _legacyRuntimeConstants;
+ return _findCommand.getLegacyRuntimeConstants();
+ }
+
+ bool getTailable() const {
+ return _findCommand.getTailable();
+ }
+
+ bool getAwaitData() const {
+ return _findCommand.getAwaitData();
}
void setLetParameters(BSONObj letParams) {
- _letParameters = std::move(letParams);
+ _findCommand.setLet(std::move(letParams));
}
const boost::optional<BSONObj>& getLetParameters() const {
- return _letParameters;
+ return _findCommand.getLet();
}
bool isSlaveOk() const {
@@ -402,11 +396,11 @@ public:
}
bool isNoCursorTimeout() const {
- return _noCursorTimeout;
+ return _findCommand.getNoCursorTimeout();
}
void setNoCursorTimeout(bool noCursorTimeout) {
- _noCursorTimeout = noCursorTimeout;
+ _findCommand.setNoCursorTimeout(noCursorTimeout);
}
bool isExhaust() const {
@@ -418,51 +412,51 @@ public:
}
bool isAllowPartialResults() const {
- return _allowPartialResults;
+ return _findCommand.getAllowPartialResults();
}
void setAllowPartialResults(bool allowPartialResults) {
- _allowPartialResults = allowPartialResults;
+ _findCommand.setAllowPartialResults(allowPartialResults);
}
- boost::optional<long long> getReplicationTerm() const {
- return _replicationTerm;
+ boost::optional<std::int64_t> getReplicationTerm() const {
+ return _findCommand.getTerm();
}
- void setReplicationTerm(boost::optional<long long> replicationTerm) {
- _replicationTerm = replicationTerm;
+ void setReplicationTerm(boost::optional<std::int64_t> replicationTerm) {
+ _findCommand.setTerm(replicationTerm);
}
bool isReadOnce() const {
- return _readOnce;
+ return _findCommand.getReadOnce();
}
void setReadOnce(bool readOnce) {
- _readOnce = readOnce;
+ _findCommand.setReadOnce(readOnce);
}
void setAllowSpeculativeMajorityRead(bool allowSpeculativeMajorityRead) {
- _allowSpeculativeMajorityRead = allowSpeculativeMajorityRead;
+ _findCommand.setAllowSpeculativeMajorityRead(allowSpeculativeMajorityRead);
}
bool allowSpeculativeMajorityRead() const {
- return _allowSpeculativeMajorityRead;
+ return _findCommand.getAllowSpeculativeMajorityRead();
}
bool getRequestResumeToken() const {
- return _requestResumeToken;
+ return _findCommand.getRequestResumeToken();
}
void setRequestResumeToken(bool requestResumeToken) {
- _requestResumeToken = requestResumeToken;
+ _findCommand.setRequestResumeToken(requestResumeToken);
}
const BSONObj& getResumeAfter() const {
- return _resumeAfter;
+ return _findCommand.getResumeAfter();
}
void setResumeAfter(BSONObj resumeAfter) {
- _resumeAfter = resumeAfter;
+ _findCommand.setResumeAfter(resumeAfter.getOwned());
}
/**
@@ -519,88 +513,19 @@ private:
*/
void addMetaProjection();
- /**
- * Common code for UUID and namespace-based find commands.
- */
- void asFindCommandInternal(BSONObjBuilder* cmdBuilder) const;
-
- NamespaceString _nss;
- OptionalCollectionUUID _uuid;
-
- BSONObj _filter;
- BSONObj _proj;
- BSONObj _sort;
- // The hint provided, if any. If the hint was by index key pattern, the value of '_hint' is
- // the key pattern hinted. If the hint was by index name, the value of '_hint' is
- // {$hint: <String>}, where <String> is the index name hinted.
- BSONObj _hint;
- // The read concern is parsed elsewhere.
- boost::optional<BSONObj> _readConcern;
- // The collation is parsed elsewhere.
- BSONObj _collation;
-
- // The unwrapped readPreference object, if one was given to us by the mongos command processor.
- // This object will be empty when no readPreference is specified or if the request does not
- // originate from mongos.
- BSONObj _unwrappedReadPref;
-
- // If true, each cursor response will include a 'postBatchResumeToken' field containing the
- // RecordID of the last observed document.
- bool _requestResumeToken = false;
- // If non-empty, instructs the query to resume from the RecordId given by the object's $recordId
- // field.
- BSONObj _resumeAfter;
-
- bool _wantMore = true;
-
- // Must be either unset or positive. Negative skip is illegal and a skip of zero received from
- // the client is interpreted as the absence of a skip value.
- boost::optional<long long> _skip;
-
- // Must be either unset or positive. Negative limit is illegal and a limit value of zero
- // received from the client is interpreted as the absence of a limit value.
- boost::optional<long long> _limit;
-
- // Must be either unset or non-negative. Negative batchSize is illegal but batchSize of 0 is
- // allowed.
- boost::optional<long long> _batchSize;
-
- bool _allowDiskUse = false;
-
- // Set only when parsed from an OP_QUERY find message. The value is computed by driver or shell
- // and is set to be a min of batchSize and limit provided by user. QR can have set either
- // ntoreturn or batchSize / limit.
- boost::optional<long long> _ntoreturn;
+ // TODO SERVER-53060: This additional nesting can be avoided if we move the below fields
+ // (_explain, _tailableMode, etc.) into the CanonicalQuery class.
+ FindCommand _findCommand;
bool _explain = false;
- // A user-specified maxTimeMS limit, or a value of '0' if not specified.
- int _maxTimeMS = 0;
-
- BSONObj _min;
- BSONObj _max;
-
- bool _returnKey = false;
- bool _showRecordId = false;
- bool _hasReadPref = false;
-
- // Runtime constants which may be referenced by $expr, if present.
- boost::optional<LegacyRuntimeConstants> _legacyRuntimeConstants;
-
- // A document containing user-specified constants. For a find query, these are accessed only
- // inside $expr.
- boost::optional<BSONObj> _letParameters;
-
// Options that can be specified in the OP_QUERY 'flags' header.
TailableModeEnum _tailableMode = TailableModeEnum::kNormal;
bool _slaveOk = false;
- bool _noCursorTimeout = false;
bool _exhaust = false;
- bool _allowPartialResults = false;
- bool _readOnce = false;
- bool _allowSpeculativeMajorityRead = false;
- boost::optional<long long> _replicationTerm;
+ // Parameters used only by the legacy query request.
+ bool _hasReadPref = false;
};
} // namespace mongo
diff --git a/src/mongo/db/query/query_request_test.cpp b/src/mongo/db/query/query_request_test.cpp
index 49692f72f5c..4a042669352 100644
--- a/src/mongo/db/query/query_request_test.cpp
+++ b/src/mongo/db/query/query_request_test.cpp
@@ -33,6 +33,7 @@
#include <boost/optional.hpp>
#include <boost/optional/optional_io.hpp>
+#include "mongo/base/error_codes.h"
#include "mongo/db/catalog/collection_catalog.h"
#include "mongo/db/catalog/collection_mock.h"
#include "mongo/db/dbmessage.h"
@@ -53,7 +54,7 @@ static const NamespaceString testns("testdb.testcoll");
TEST(QueryRequestTest, LimitWithNToReturn) {
QueryRequest qr(testns);
- qr.setLimit(0);
+ qr.setLimit(1);
qr.setNToReturn(0);
ASSERT_NOT_OK(qr.validate());
}
@@ -194,35 +195,34 @@ TEST(QueryRequestTest, ForbidTailableWithNonNaturalSort) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"tailable: true,"
- "sort: {a: 1}}");
- const NamespaceString nss("test.testns");
+ "sort: {a: 1}, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(
+ QueryRequest::makeFromFindCommand(cmdObj, isExplain), DBException, ErrorCodes::BadValue);
}
TEST(QueryRequestTest, ForbidTailableWithSingleBatch) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"tailable: true,"
- "singleBatch: true}");
- const NamespaceString nss("test.testns");
+ "singleBatch: true, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(
+ QueryRequest::makeFromFindCommand(cmdObj, isExplain), DBException, ErrorCodes::BadValue);
}
TEST(QueryRequestTest, AllowTailableWithNaturalSort) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"tailable: true,"
- "sort: {$natural: 1}}");
- const NamespaceString nss("test.testns");
+ "sort: {$natural: 1}, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_OK(result.getStatus());
- ASSERT_TRUE(result.getValue()->isTailable());
- ASSERT_BSONOBJ_EQ(result.getValue()->getSort(), BSON("$natural" << 1));
+ auto qr = QueryRequest::makeFromFindCommand(cmdObj, isExplain);
+ ASSERT_TRUE(qr->isTailable());
+ ASSERT_BSONOBJ_EQ(qr->getSort(), BSON("$natural" << 1));
}
//
@@ -380,11 +380,10 @@ TEST(QueryRequestTest, ParseFromCommandBasic) {
"{find: 'testns',"
"filter: {a: 3},"
"sort: {a: 1},"
- "projection: {_id: 0, a: 1}}");
- const NamespaceString nss("test.testns");
+ "projection: {_id: 0, a: 1}, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_OK(result.getStatus());
+ QueryRequest::makeFromFindCommand(cmdObj, isExplain);
}
TEST(QueryRequestTest, ParseFromCommandWithOptions) {
@@ -393,11 +392,10 @@ TEST(QueryRequestTest, ParseFromCommandWithOptions) {
"filter: {a: 3},"
"sort: {a: 1},"
"projection: {_id: 0, a: 1},"
- "showRecordId: true}}");
- const NamespaceString nss("test.testns");
+ "showRecordId: true, '$db': 'test'}");
+
bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
// Make sure the values from the command BSON are reflected in the QR.
ASSERT(qr->showRecordId());
@@ -407,11 +405,10 @@ TEST(QueryRequestTest, ParseFromCommandHintAsString) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
- "hint: 'foo_1'}");
- const NamespaceString nss("test.testns");
+ "hint: 'foo_1', '$db': 'test'}");
+
bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
BSONObj hintObj = qr->getHint();
ASSERT_BSONOBJ_EQ(BSON("$hint"
@@ -423,20 +420,20 @@ TEST(QueryRequestTest, ParseFromCommandValidSortProj) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"projection: {a: 1},"
- "sort: {a: 1}}");
- const NamespaceString nss("test.testns");
+ "sort: {a: 1}, '$db': 'test'}");
+
bool isExplain = false;
- ASSERT_OK(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
+ QueryRequest::makeFromFindCommand(cmdObj, isExplain);
}
TEST(QueryRequestTest, ParseFromCommandValidSortProjMeta) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"projection: {a: {$meta: 'textScore'}},"
- "sort: {a: {$meta: 'textScore'}}}");
- const NamespaceString nss("test.testns");
+ "sort: {a: {$meta: 'textScore'}}, '$db': 'test'}");
+
bool isExplain = false;
- ASSERT_OK(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
+ QueryRequest::makeFromFindCommand(cmdObj, isExplain);
}
TEST(QueryRequestTest, ParseFromCommandAllFlagsTrue) {
@@ -447,11 +444,10 @@ TEST(QueryRequestTest, ParseFromCommandAllFlagsTrue) {
"awaitData: true,"
"allowPartialResults: true,"
"readOnce: true,"
- "allowSpeculativeMajorityRead: true}");
- const NamespaceString nss("test.testns");
+ "allowSpeculativeMajorityRead: true, '$db': 'test'}");
+
bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
// Test that all the flags got set to true.
ASSERT(qr->isTailable());
@@ -466,14 +462,14 @@ TEST(QueryRequestTest, ParseFromCommandAllFlagsTrue) {
TEST(QueryRequestTest, OplogReplayFlagIsAllowedButIgnored) {
auto cmdObj = BSON("find"
<< "testns"
- << "oplogReplay" << true << "tailable" << true);
+ << "oplogReplay" << true << "tailable" << true << "$db"
+ << "test");
const bool isExplain = false;
const NamespaceString nss{"test.testns"};
- auto qr = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_OK(qr.getStatus());
+ auto qr = QueryRequest::makeFromFindCommand(cmdObj, isExplain);
// Verify that the 'oplogReplay' flag does not appear if we reserialize the request.
- auto reserialized = qr.getValue()->asFindCommand();
+ auto reserialized = qr->asFindCommand();
ASSERT_BSONOBJ_EQ(reserialized,
BSON("find"
<< "testns"
@@ -481,11 +477,10 @@ TEST(QueryRequestTest, OplogReplayFlagIsAllowedButIgnored) {
}
TEST(QueryRequestTest, ParseFromCommandReadOnceDefaultsToFalse) {
- BSONObj cmdObj = fromjson("{find: 'testns'}");
- const NamespaceString nss("test.testns");
+ BSONObj cmdObj = fromjson("{find: 'testns', '$db': 'test'}");
+
bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
ASSERT(!qr->isReadOnce());
}
@@ -494,12 +489,10 @@ TEST(QueryRequestTest, ParseFromCommandValidMinMax) {
"{find: 'testns',"
"comment: 'the comment',"
"min: {a: 1},"
- "max: {a: 2}}");
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+ "max: {a: 2}, '$db': 'test'}");
+ bool isExplain = false;
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
BSONObj expectedMin = BSON("a" << 1);
ASSERT_EQUALS(0, expectedMin.woCompare(qr->getMin()));
BSONObj expectedMax = BSON("a" << 2);
@@ -521,13 +514,11 @@ TEST(QueryRequestTest, ParseFromCommandAllNonOptionFields) {
"limit: 3,"
"skip: 5,"
"batchSize: 90,"
- "singleBatch: false}")
+ "singleBatch: false, '$db': 'test'}")
.addField(rtcObj["runtimeConstants"]);
- const NamespaceString nss("test.testns");
- bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+ bool isExplain = false;
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
// Check the values inside the QR.
BSONObj expectedQuery = BSON("a" << 1);
ASSERT_EQUALS(0, expectedQuery.woCompare(qr->getFilter()));
@@ -538,7 +529,8 @@ TEST(QueryRequestTest, ParseFromCommandAllNonOptionFields) {
BSONObj expectedHint = BSON("d" << 1);
ASSERT_EQUALS(0, expectedHint.woCompare(qr->getHint()));
BSONObj expectedReadConcern = BSON("e" << 1);
- ASSERT_EQUALS(0, expectedReadConcern.woCompare(qr->getReadConcern()));
+ ASSERT(qr->getReadConcern());
+ ASSERT_BSONOBJ_EQ(expectedReadConcern, *qr->getReadConcern());
BSONObj expectedUnwrappedReadPref = BSON("$readPreference"
<< "secondary");
ASSERT_EQUALS(0, expectedUnwrappedReadPref.woCompare(qr->getUnwrappedReadPref()));
@@ -550,18 +542,17 @@ TEST(QueryRequestTest, ParseFromCommandAllNonOptionFields) {
ASSERT(qr->getLegacyRuntimeConstants().has_value());
ASSERT_EQUALS(qr->getLegacyRuntimeConstants()->getLocalNow(), rtc.getLocalNow());
ASSERT_EQUALS(qr->getLegacyRuntimeConstants()->getClusterTime(), rtc.getClusterTime());
- ASSERT(qr->wantMore());
+ ASSERT(!qr->isSingleBatch());
}
TEST(QueryRequestTest, ParseFromCommandLargeLimit) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
- "limit: 8000000000}"); // 8 * 1000 * 1000 * 1000
- const NamespaceString nss("test.testns");
+ "limit: 8000000000, '$db': 'test'}"); // 8 * 1000 * 1000 * 1000
+
const bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
ASSERT_EQUALS(8LL * 1000 * 1000 * 1000, *qr->getLimit());
}
@@ -570,11 +561,10 @@ TEST(QueryRequestTest, ParseFromCommandLargeBatchSize) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
- "batchSize: 8000000000}"); // 8 * 1000 * 1000 * 1000
- const NamespaceString nss("test.testns");
+ "batchSize: 8000000000, '$db': 'test'}"); // 8 * 1000 * 1000 * 1000
+
const bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
ASSERT_EQUALS(8LL * 1000 * 1000 * 1000, *qr->getBatchSize());
}
@@ -583,11 +573,10 @@ TEST(QueryRequestTest, ParseFromCommandLargeSkip) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
- "skip: 8000000000}"); // 8 * 1000 * 1000 * 1000
- const NamespaceString nss("test.testns");
+ "skip: 8000000000, '$db': 'test'}"); // 8 * 1000 * 1000 * 1000
+
const bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
ASSERT_EQUALS(8LL * 1000 * 1000 * 1000, *qr->getSkip());
}
@@ -599,45 +588,51 @@ TEST(QueryRequestTest, ParseFromCommandLargeSkip) {
TEST(QueryRequestTest, ParseFromCommandQueryWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
- "filter: 3}");
- const NamespaceString nss("test.testns");
+ "filter: 3, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::TypeMismatch);
}
TEST(QueryRequestTest, ParseFromCommandSortWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
- "sort: 3}");
- const NamespaceString nss("test.testns");
+ "sort: 3, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::TypeMismatch);
}
+
TEST(QueryRequestTest, ParseFromCommandProjWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
- "projection: 'foo'}");
- const NamespaceString nss("test.testns");
+ "projection: 'foo', '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::TypeMismatch);
}
+
TEST(QueryRequestTest, ParseFromCommandSkipWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
"skip: '5',"
- "projection: {a: 1}}");
- const NamespaceString nss("test.testns");
+ "projection: {a: 1}, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::TypeMismatch);
}
@@ -646,134 +641,147 @@ TEST(QueryRequestTest, ParseFromCommandLimitWrongType) {
"{find: 'testns',"
"filter: {a: 1},"
"limit: '5',"
- "projection: {a: 1}}");
- const NamespaceString nss("test.testns");
+ "projection: {a: 1}, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::TypeMismatch);
}
+
TEST(QueryRequestTest, ParseFromCommandSingleBatchWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
"singleBatch: 'false',"
- "projection: {a: 1}}");
- const NamespaceString nss("test.testns");
+ "projection: {a: 1}, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::TypeMismatch);
}
+
TEST(QueryRequestTest, ParseFromCommandUnwrappedReadPrefWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
- "$queryOptions: 1}");
- const NamespaceString nss("test.testns");
+ "$queryOptions: 1, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::TypeMismatch);
}
+
TEST(QueryRequestTest, ParseFromCommandMaxTimeMSWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
- "maxTimeMS: true}");
- const NamespaceString nss("test.testns");
+ "maxTimeMS: true, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(
+ QueryRequest::makeFromFindCommand(cmdObj, isExplain), DBException, ErrorCodes::BadValue);
}
+
TEST(QueryRequestTest, ParseFromCommandMaxWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
- "max: 3}");
- const NamespaceString nss("test.testns");
+ "max: 3, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::TypeMismatch);
}
+
TEST(QueryRequestTest, ParseFromCommandMinWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
- "min: 3}");
- const NamespaceString nss("test.testns");
+ "min: 3, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::TypeMismatch);
}
TEST(QueryRequestTest, ParseFromCommandReturnKeyWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
- "returnKey: 3}");
- const NamespaceString nss("test.testns");
+ "returnKey: 3, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::TypeMismatch);
}
-
TEST(QueryRequestTest, ParseFromCommandShowRecordIdWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
- "showRecordId: 3}");
- const NamespaceString nss("test.testns");
+ "showRecordId: 3, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::TypeMismatch);
}
TEST(QueryRequestTest, ParseFromCommandTailableWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
- "tailable: 3}");
- const NamespaceString nss("test.testns");
+ "tailable: 3, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::TypeMismatch);
}
TEST(QueryRequestTest, ParseFromCommandSlaveOkWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
- "slaveOk: 3}");
- const NamespaceString nss("test.testns");
+ "slaveOk: 3, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain), DBException, 40415);
}
TEST(QueryRequestTest, ParseFromCommandOplogReplayWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
- "oplogReplay: 3}");
- const NamespaceString nss("test.testns");
+ "oplogReplay: 3, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::TypeMismatch);
}
TEST(QueryRequestTest, ParseFromCommandNoCursorTimeoutWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
- "noCursorTimeout: 3}");
- const NamespaceString nss("test.testns");
+ "noCursorTimeout: 3, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::TypeMismatch);
}
TEST(QueryRequestTest, ParseFromCommandAwaitDataWrongType) {
@@ -781,76 +789,85 @@ TEST(QueryRequestTest, ParseFromCommandAwaitDataWrongType) {
"{find: 'testns',"
"filter: {a: 1},"
"tailable: true,"
- "awaitData: 3}");
- const NamespaceString nss("test.testns");
+ "awaitData: 3, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::TypeMismatch);
}
+
TEST(QueryRequestTest, ParseFromCommandExhaustWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
- "exhaust: 3}");
- const NamespaceString nss("test.testns");
+ "exhaust: 3, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain), DBException, 40415);
}
+
TEST(QueryRequestTest, ParseFromCommandPartialWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
- "allowPartialResults: 3}");
- const NamespaceString nss("test.testns");
+ "allowPartialResults: 3, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::TypeMismatch);
}
TEST(QueryRequestTest, ParseFromCommandReadConcernWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
- "readConcern: 'foo'}");
- const NamespaceString nss("test.testns");
+ "readConcern: 'foo', '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::TypeMismatch);
}
TEST(QueryRequestTest, ParseFromCommandCollationWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
- "collation: 'foo'}");
- const NamespaceString nss("test.testns");
+ "collation: 'foo', '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::TypeMismatch);
}
TEST(QueryRequestTest, ParseFromCommandReadOnceWrongType) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
- "readOnce: 1}");
- const NamespaceString nss("test.testns");
+ "readOnce: 1, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_EQ(ErrorCodes::FailedToParse, result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::TypeMismatch);
}
TEST(QueryRequestTest, ParseFromCommandLegacyRuntimeConstantsWrongType) {
BSONObj cmdObj = BSON("find"
<< "testns"
<< "runtimeConstants"
- << "shouldNotBeString");
- const NamespaceString nss("test.testns");
+ << "shouldNotBeString"
+ << "$db"
+ << "test");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_EQ(ErrorCodes::FailedToParse, result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::TypeMismatch);
}
TEST(QueryRequestTest, ParseFromCommandLegacyRuntimeConstantsSubfieldsWrongType) {
@@ -860,10 +877,11 @@ TEST(QueryRequestTest, ParseFromCommandLegacyRuntimeConstantsSubfieldsWrongType)
<< BSON("localNow"
<< "shouldBeDate"
<< "clusterTime"
- << "shouldBeTimestamp"));
- const NamespaceString nss("test.testns");
+ << "shouldBeTimestamp")
+ << "$db"
+ << "test");
bool isExplain = false;
- ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain),
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
AssertionException,
ErrorCodes::TypeMismatch);
}
@@ -876,22 +894,19 @@ TEST(QueryRequestTest, ParseFromCommandNegativeSkipError) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"skip: -3,"
- "filter: {a: 3}}");
- const NamespaceString nss("test.testns");
+ "filter: {a: 3}, '$db': 'test'}");
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(
+ QueryRequest::makeFromFindCommand(cmdObj, isExplain), DBException, ErrorCodes::BadValue);
}
TEST(QueryRequestTest, ParseFromCommandSkipIsZero) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"skip: 0,"
- "filter: {a: 3}}");
- const NamespaceString nss("test.testns");
+ "filter: {a: 3}, '$db': 'test'}");
bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
ASSERT_BSONOBJ_EQ(BSON("a" << 3), qr->getFilter());
ASSERT_FALSE(qr->getSkip());
}
@@ -900,22 +915,19 @@ TEST(QueryRequestTest, ParseFromCommandNegativeLimitError) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"limit: -3,"
- "filter: {a: 3}}");
- const NamespaceString nss("test.testns");
+ "filter: {a: 3}, '$db': 'test'}");
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(
+ QueryRequest::makeFromFindCommand(cmdObj, isExplain), DBException, ErrorCodes::BadValue);
}
TEST(QueryRequestTest, ParseFromCommandLimitIsZero) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"limit: 0,"
- "filter: {a: 3}}");
- const NamespaceString nss("test.testns");
+ "filter: {a: 3}, '$db': 'test'}");
bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
ASSERT_BSONOBJ_EQ(BSON("a" << 3), qr->getFilter());
ASSERT_FALSE(qr->getLimit());
}
@@ -924,32 +936,26 @@ TEST(QueryRequestTest, ParseFromCommandNegativeBatchSizeError) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"batchSize: -10,"
- "filter: {a: 3}}");
- const NamespaceString nss("test.testns");
+ "filter: {a: 3}, '$db': 'test'}");
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(
+ QueryRequest::makeFromFindCommand(cmdObj, isExplain), DBException, ErrorCodes::BadValue);
}
TEST(QueryRequestTest, ParseFromCommandBatchSizeZero) {
- BSONObj cmdObj = fromjson("{find: 'testns', batchSize: 0}");
- const NamespaceString nss("test.testns");
+ BSONObj cmdObj = fromjson("{find: 'testns', batchSize: 0, '$db': 'test'}");
bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
ASSERT(qr->getBatchSize());
ASSERT_EQ(0, *qr->getBatchSize());
-
ASSERT(!qr->getLimit());
}
TEST(QueryRequestTest, ParseFromCommandDefaultBatchSize) {
- BSONObj cmdObj = fromjson("{find: 'testns'}");
- const NamespaceString nss("test.testns");
+ BSONObj cmdObj = fromjson("{find: 'testns', '$db': 'test'}");
bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
ASSERT(!qr->getBatchSize());
ASSERT(!qr->getLimit());
@@ -959,38 +965,39 @@ TEST(QueryRequestTest, ParseFromCommandRequestResumeToken) {
BSONObj cmdObj = BSON("find"
<< "testns"
<< "hint" << BSON("$natural" << 1) << "sort" << BSON("$natural" << 1)
- << "$_requestResumeToken" << true);
- const NamespaceString nss("test.testns");
+ << "$_requestResumeToken" << true << "$db"
+ << "test");
+
bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
ASSERT(qr->getRequestResumeToken());
}
TEST(QueryRequestTest, ParseFromCommandResumeToken) {
- BSONObj cmdObj =
- BSON("find"
- << "testns"
- << "hint" << BSON("$natural" << 1) << "sort" << BSON("$natural" << 1)
- << "$_requestResumeToken" << true << "$_resumeAfter" << BSON("$recordId" << 1LL));
- const NamespaceString nss("test.testns");
+ BSONObj cmdObj = BSON("find"
+ << "testns"
+ << "hint" << BSON("$natural" << 1) << "sort" << BSON("$natural" << 1)
+ << "$_requestResumeToken" << true << "$_resumeAfter"
+ << BSON("$recordId" << 1LL) << "$db"
+ << "test");
+
bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
ASSERT(!qr->getResumeAfter().isEmpty());
ASSERT(qr->getRequestResumeToken());
}
TEST(QueryRequestTest, ParseFromCommandEmptyResumeToken) {
BSONObj resumeAfter = fromjson("{}");
- BSONObj cmdObj = BSON("find"
- << "testns"
- << "hint" << BSON("$natural" << 1) << "sort" << BSON("$natural" << 1)
- << "$_requestResumeToken" << true << "$_resumeAfter" << resumeAfter);
- const NamespaceString nss("test.testns");
+ BSONObj cmdObj =
+ BSON("find"
+ << "testns"
+ << "hint" << BSON("$natural" << 1) << "sort" << BSON("$natural" << 1)
+ << "$_requestResumeToken" << true << "$_resumeAfter" << resumeAfter << "$db"
+ << "test");
+
bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
ASSERT(qr->getRequestResumeToken());
ASSERT(qr->getResumeAfter().isEmpty());
}
@@ -1000,31 +1007,30 @@ TEST(QueryRequestTest, ParseFromCommandEmptyResumeToken) {
//
TEST(QueryRequestTest, AsFindCommandAllNonOptionFields) {
- BSONObj rtcObj = BSON("runtimeConstants"
- << (LegacyRuntimeConstants{Date_t::now(), Timestamp(1, 1)}.toBSON()));
+ BSONObj storage = BSON("runtimeConstants"
+ << (LegacyRuntimeConstants{Date_t::now(), Timestamp(1, 1)}.toBSON()));
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"filter: {a: 1},"
"projection: {c: 1},"
"sort: {b: 1},"
"hint: {d: 1},"
- "readConcern: {e: 1},"
"collation: {f: 1},"
"skip: 5,"
"limit: 3,"
"batchSize: 90,"
- "singleBatch: true}")
- .addField(rtcObj["runtimeConstants"]);
- const NamespaceString nss("test.testns");
+ "singleBatch: true, "
+ "readConcern: {e: 1}, '$db': 'test'}")
+ .addField(storage["runtimeConstants"]);
+
bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
- ASSERT_BSONOBJ_EQ(cmdObj, qr->asFindCommand());
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
+ ASSERT_BSONOBJ_EQ(cmdObj.removeField("$db"), qr->asFindCommand());
}
TEST(QueryRequestTest, AsFindCommandWithUuidAllNonOptionFields) {
- BSONObj rtcObj = BSON("runtimeConstants"
- << (LegacyRuntimeConstants{Date_t::now(), Timestamp(1, 1)}.toBSON()));
+ BSONObj storage = BSON("runtimeConstants"
+ << (LegacyRuntimeConstants{Date_t::now(), Timestamp(1, 1)}.toBSON()));
BSONObj cmdObj =
fromjson(
// This binary value is UUID("01234567-89ab-cdef-edcb-a98765432101")
@@ -1033,18 +1039,17 @@ TEST(QueryRequestTest, AsFindCommandWithUuidAllNonOptionFields) {
"projection: {c: 1},"
"sort: {b: 1},"
"hint: {d: 1},"
- "readConcern: {e: 1},"
"collation: {f: 1},"
"skip: 5,"
"limit: 3,"
"batchSize: 90,"
- "singleBatch: true}")
- .addField(rtcObj["runtimeConstants"]);
- const NamespaceString nss("test.testns");
+ "singleBatch: true,"
+ "readConcern: {e: 1}, '$db': 'test'}")
+ .addField(storage["runtimeConstants"]);
+
bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
- ASSERT_BSONOBJ_EQ(cmdObj, qr->asFindCommandWithUuid());
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
+ ASSERT_BSONOBJ_EQ(cmdObj.removeField("$db"), qr->asFindCommand());
}
TEST(QueryRequestTest, AsFindCommandWithUuidNoAvailableNamespace) {
@@ -1052,33 +1057,33 @@ TEST(QueryRequestTest, AsFindCommandWithUuidNoAvailableNamespace) {
fromjson("{find: { \"$binary\" : \"ASNFZ4mrze/ty6mHZUMhAQ==\", \"$type\" : \"04\" }}");
QueryRequest qr(NamespaceStringOrUUID(
"test", UUID::parse("01234567-89ab-cdef-edcb-a98765432101").getValue()));
- ASSERT_BSONOBJ_EQ(cmdObj, qr.asFindCommandWithUuid());
+ ASSERT_BSONOBJ_EQ(cmdObj.removeField("$db"), qr.asFindCommand());
}
TEST(QueryRequestTest, AsFindCommandWithResumeToken) {
- BSONObj cmdObj =
- BSON("find"
- << "testns"
- << "sort" << BSON("$natural" << 1) << "hint" << BSON("$natural" << 1)
- << "$_requestResumeToken" << true << "$_resumeAfter" << BSON("$recordId" << 1LL));
- const NamespaceString nss("test.testns");
+ BSONObj cmdObj = BSON("find"
+ << "testns"
+ << "sort" << BSON("$natural" << 1) << "hint" << BSON("$natural" << 1)
+ << "$_requestResumeToken" << true << "$_resumeAfter"
+ << BSON("$recordId" << 1LL) << "$db"
+ << "test");
+
bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
- ASSERT_BSONOBJ_EQ(cmdObj, qr->asFindCommand());
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
+ ASSERT_BSONOBJ_EQ(cmdObj.removeField("$db"), qr->asFindCommand());
}
TEST(QueryRequestTest, AsFindCommandWithEmptyResumeToken) {
BSONObj resumeAfter = fromjson("{}");
- BSONObj cmdObj = BSON("find"
- << "testns"
- << "hint" << BSON("$natural" << 1) << "sort" << BSON("$natural" << 1)
- << "$_requestResumeToken" << true << "$_resumeAfter" << resumeAfter);
- const NamespaceString nss("test.testns");
+ BSONObj cmdObj =
+ BSON("find"
+ << "testns"
+ << "hint" << BSON("$natural" << 1) << "sort" << BSON("$natural" << 1)
+ << "$_requestResumeToken" << true << "$_resumeAfter" << resumeAfter << "$db"
+ << "test");
bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
- ASSERT(qr->asFindCommand().getField("$_resumeAfter").eoo());
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
+ ASSERT(qr->asFindCommand().getField("$_resumeAftr").eoo());
}
//
@@ -1090,11 +1095,9 @@ TEST(QueryRequestTest, ParseFromCommandMinMaxDifferentFieldsError) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
"min: {a: 3},"
- "max: {b: 4}}");
- const NamespaceString nss("test.testns");
+ "max: {b: 4}, '$db': 'test'}");
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain), DBException, 51176);
}
TEST(QueryRequestTest, ParseCommandAllowNonMetaSortOnFieldWithMetaProject) {
@@ -1103,17 +1106,15 @@ TEST(QueryRequestTest, ParseCommandAllowNonMetaSortOnFieldWithMetaProject) {
cmdObj = fromjson(
"{find: 'testns',"
"projection: {a: {$meta: 'textScore'}},"
- "sort: {a: 1}}");
- const NamespaceString nss("test.testns");
+ "sort: {a: 1}, '$db': 'test'}");
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_OK(result.getStatus());
+ QueryRequest::makeFromFindCommand(cmdObj, isExplain);
cmdObj = fromjson(
"{find: 'testns',"
"projection: {a: {$meta: 'textScore'}},"
- "sort: {b: 1}}");
- ASSERT_OK(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain).getStatus());
+ "sort: {b: 1}, '$db': 'test'}");
+ QueryRequest::makeFromFindCommand(cmdObj, isExplain);
}
TEST(QueryRequestTest, ParseCommandAllowMetaSortOnFieldWithoutMetaProject) {
@@ -1122,73 +1123,63 @@ TEST(QueryRequestTest, ParseCommandAllowMetaSortOnFieldWithoutMetaProject) {
cmdObj = fromjson(
"{find: 'testns',"
"projection: {a: 1},"
- "sort: {a: {$meta: 'textScore'}}}");
- const NamespaceString nss("test.testns");
+ "sort: {a: {$meta: 'textScore'}}, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_OK(result.getStatus());
+ auto qr = QueryRequest::makeFromFindCommand(cmdObj, isExplain);
cmdObj = fromjson(
"{find: 'testns',"
"projection: {b: 1},"
- "sort: {a: {$meta: 'textScore'}}}");
- result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_OK(result.getStatus());
+ "sort: {a: {$meta: 'textScore'}}, '$db': 'test'}");
+ qr = QueryRequest::makeFromFindCommand(cmdObj, isExplain);
}
TEST(QueryRequestTest, ParseCommandForbidExhaust) {
- BSONObj cmdObj = fromjson("{find: 'testns', exhaust: true}");
- const NamespaceString nss("test.testns");
+ BSONObj cmdObj = fromjson("{find: 'testns', exhaust: true, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain), DBException, 40415);
}
TEST(QueryRequestTest, ParseCommandIsFromFindCommand) {
- BSONObj cmdObj = fromjson("{find: 'testns'}");
- const NamespaceString nss("test.testns");
+ BSONObj cmdObj = fromjson("{find: 'testns', '$db': 'test'}");
bool isExplain = false;
- unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain)));
+ unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, isExplain));
ASSERT_FALSE(qr->getNToReturn());
}
TEST(QueryRequestTest, ParseCommandAwaitDataButNotTailable) {
- const NamespaceString nss("test.testns");
- BSONObj cmdObj = fromjson("{find: 'testns', awaitData: true}");
+ BSONObj cmdObj = fromjson("{find: 'testns', awaitData: true, '$db': 'test'}");
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain),
+ DBException,
+ ErrorCodes::FailedToParse);
}
TEST(QueryRequestTest, ParseCommandFirstFieldNotString) {
- BSONObj cmdObj = fromjson("{find: 1}");
- const NamespaceString nss("test.testns");
+ BSONObj cmdObj = fromjson("{find: 1, '$db': 'test'}");
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(
+ QueryRequest::makeFromFindCommand(cmdObj, isExplain), DBException, ErrorCodes::BadValue);
}
TEST(QueryRequestTest, ParseCommandIgnoreShardVersionField) {
- BSONObj cmdObj = fromjson("{find: 'test.testns', shardVersion: 'foo'}");
- const NamespaceString nss("test.testns");
+ BSONObj cmdObj = fromjson("{find: 'test.testns', shardVersion: 'foo', '$db': 'test'}");
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_OK(result.getStatus());
+ QueryRequest::makeFromFindCommand(cmdObj, isExplain);
}
TEST(QueryRequestTest, DefaultQueryParametersCorrect) {
- BSONObj cmdObj = fromjson("{find: 'testns'}");
+ BSONObj cmdObj = fromjson("{find: 'testns', '$db': 'test'}");
- const NamespaceString nss("test.testns");
- std::unique_ptr<QueryRequest> qr(
- assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, false)));
+ std::unique_ptr<QueryRequest> qr(QueryRequest::makeFromFindCommand(cmdObj, false));
ASSERT_FALSE(qr->getSkip());
ASSERT_FALSE(qr->getLimit());
- ASSERT_EQUALS(true, qr->wantMore());
+ ASSERT_FALSE(qr->isSingleBatch());
ASSERT_FALSE(qr->getNToReturn());
ASSERT_EQUALS(false, qr->isExplain());
ASSERT_EQUALS(0, qr->getMaxTimeMS());
@@ -1206,23 +1197,21 @@ TEST(QueryRequestTest, DefaultQueryParametersCorrect) {
}
TEST(QueryRequestTest, ParseCommandAllowDiskUseTrue) {
- BSONObj cmdObj = fromjson("{find: 'testns', allowDiskUse: true}");
- const NamespaceString nss("test.testns");
+ BSONObj cmdObj = fromjson("{find: 'testns', allowDiskUse: true, '$db': 'test'}");
+
const bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ auto result = QueryRequest::makeFromFindCommand(cmdObj, isExplain);
- ASSERT_OK(result.getStatus());
- ASSERT_EQ(true, result.getValue()->allowDiskUse());
+ ASSERT_EQ(true, result->allowDiskUse());
}
TEST(QueryRequestTest, ParseCommandAllowDiskUseFalse) {
- BSONObj cmdObj = fromjson("{find: 'testns', allowDiskUse: false}");
- const NamespaceString nss("test.testns");
+ BSONObj cmdObj = fromjson("{find: 'testns', allowDiskUse: false, '$db': 'test'}");
+
const bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
+ auto result = QueryRequest::makeFromFindCommand(cmdObj, isExplain);
- ASSERT_OK(result.getStatus());
- ASSERT_EQ(false, result.getValue()->allowDiskUse());
+ ASSERT_EQ(false, result->allowDiskUse());
}
//
@@ -1232,21 +1221,19 @@ TEST(QueryRequestTest, ParseCommandAllowDiskUseFalse) {
TEST(QueryRequestTest, ParseFromCommandForbidExtraField) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
- "foo: {a: 1}}");
- const NamespaceString nss("test.testns");
+ "foo: {a: 1}, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain), DBException, 40415);
}
TEST(QueryRequestTest, ParseFromCommandForbidExtraOption) {
BSONObj cmdObj = fromjson(
"{find: 'testns',"
- "foo: true}");
- const NamespaceString nss("test.testns");
+ "foo: true, '$db': 'test'}");
+
bool isExplain = false;
- auto result = QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain);
- ASSERT_NOT_OK(result.getStatus());
+ ASSERT_THROWS_CODE(QueryRequest::makeFromFindCommand(cmdObj, isExplain), DBException, 40415);
}
TEST(QueryRequestTest, ParseMaxTimeMSStringValueFails) {
@@ -1337,22 +1324,22 @@ TEST(QueryRequestTest, ConvertToAggregationWithMaxFails) {
ASSERT_NOT_OK(qr.asAggregationCommand());
}
-TEST(QueryRequestTest, ConvertToAggregationWithNoWantMoreFails) {
+TEST(QueryRequestTest, ConvertToAggregationWithSingleBatchFieldFails) {
QueryRequest qr(testns);
- qr.setWantMore(false);
+ qr.setSingleBatchField(true);
ASSERT_NOT_OK(qr.asAggregationCommand());
}
-TEST(QueryRequestTest, ConvertToAggregationWithNoWantMoreAndLimitFails) {
+TEST(QueryRequestTest, ConvertToAggregationWithSingleBatchFieldAndLimitFails) {
QueryRequest qr(testns);
- qr.setWantMore(false);
+ qr.setSingleBatchField(true);
qr.setLimit(7);
ASSERT_NOT_OK(qr.asAggregationCommand());
}
-TEST(QueryRequestTest, ConvertToAggregationWithNoWantMoreLimitOneSucceeds) {
+TEST(QueryRequestTest, ConvertToAggregationWithSingleBatchFieldLimitOneSucceeds) {
QueryRequest qr(testns);
- qr.setWantMore(false);
+ qr.setSingleBatchField(true);
qr.setLimit(1);
ASSERT_OK(qr.asAggregationCommand());
}
@@ -1561,13 +1548,13 @@ TEST(QueryRequestTest, ConvertToFindWithAllowDiskUseFalseSucceeds) {
qr.setAllowDiskUse(false);
const auto findCmd = qr.asFindCommand();
- ASSERT_FALSE(findCmd[QueryRequest::kAllowDiskUseField]);
+ ASSERT_FALSE(findCmd[QueryRequest::kAllowDiskUseField].booleanSafe());
}
TEST(QueryRequestTest, ParseFromLegacyQuery) {
const auto kSkip = 1;
const auto kNToReturn = 2;
-
+ const NamespaceString nss("test.testns");
BSONObj queryObj = fromjson(R"({
query: {query: 1},
orderby: {sort: 1},
@@ -1576,9 +1563,10 @@ TEST(QueryRequestTest, ParseFromLegacyQuery) {
$min: {x: 'min'},
$max: {x: 'max'}
})");
- const NamespaceString nss("test.testns");
- unique_ptr<QueryRequest> qr(assertGet(QueryRequest::fromLegacyQuery(
- nss, queryObj, BSON("proj" << 1), kSkip, kNToReturn, QueryOption_Exhaust)));
+ unique_ptr<QueryRequest> qr(
+ std::move(QueryRequest::fromLegacyQuery(
+ nss, queryObj, BSON("proj" << 1), kSkip, kNToReturn, QueryOption_Exhaust)
+ .getValue()));
ASSERT_EQ(qr->nss(), nss);
ASSERT_BSONOBJ_EQ(qr->getFilter(), fromjson("{query: 1}"));
@@ -1587,9 +1575,9 @@ TEST(QueryRequestTest, ParseFromLegacyQuery) {
ASSERT_BSONOBJ_EQ(qr->getHint(), fromjson("{hint: 1}"));
ASSERT_BSONOBJ_EQ(qr->getMin(), fromjson("{x: 'min'}"));
ASSERT_BSONOBJ_EQ(qr->getMax(), fromjson("{x: 'max'}"));
- ASSERT_EQ(qr->getSkip(), boost::optional<long long>(kSkip));
- ASSERT_EQ(qr->getNToReturn(), boost::optional<long long>(kNToReturn));
- ASSERT_EQ(qr->wantMore(), true);
+ ASSERT_EQ(qr->getSkip(), boost::optional<int64_t>(kSkip));
+ ASSERT_EQ(qr->getNToReturn(), boost::optional<int64_t>(kNToReturn));
+ ASSERT_EQ(qr->isSingleBatch(), false);
ASSERT_EQ(qr->isExplain(), false);
ASSERT_EQ(qr->isSlaveOk(), false);
ASSERT_EQ(qr->isNoCursorTimeout(), false);
@@ -1639,8 +1627,8 @@ TEST(QueryRequestTest, ParseFromLegacyQueryTooNegativeNToReturn) {
BSONObj queryObj = fromjson(R"({
foo: 1
})");
- const NamespaceString nss("test.testns");
+ const NamespaceString nss("test.testns");
ASSERT_NOT_OK(
QueryRequest::fromLegacyQuery(
nss, queryObj, BSONObj(), 0, std::numeric_limits<int>::min(), QueryOption_Exhaust)
@@ -1651,11 +1639,11 @@ class QueryRequestTest : public ServiceContextTest {};
TEST_F(QueryRequestTest, ParseFromUUID) {
const CollectionUUID uuid = UUID::gen();
- const NamespaceString nss("test.testns");
+
NamespaceStringOrUUID nssOrUUID("test", uuid);
QueryRequest qr(nssOrUUID);
-
+ const NamespaceString nss("test.testns");
// Ensure a call to refreshNSS succeeds.
qr.refreshNSS(nss);
ASSERT_EQ(nss, qr.nss());
diff --git a/src/mongo/db/query/query_solution.h b/src/mongo/db/query/query_solution.h
index 1a02a4e7ead..4d3c272f7fa 100644
--- a/src/mongo/db/query/query_solution.h
+++ b/src/mongo/db/query/query_solution.h
@@ -362,9 +362,6 @@ public:
*/
void setRoot(std::unique_ptr<QuerySolutionNode> root);
- // Any filters in root or below point into this object. Must be owned.
- BSONObj filterData;
-
// There are two known scenarios in which a query solution might potentially block:
//
// Sort stage:
diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp
index 71293ad89d7..4a35ec0bb3c 100644
--- a/src/mongo/db/s/config/initial_split_policy.cpp
+++ b/src/mongo/db/s/config/initial_split_policy.cpp
@@ -88,7 +88,7 @@ StringMap<std::vector<ShardId>> buildTagsToShardIdsMap(OperationContext* opCtx,
ShardType::ConfigNS,
BSONObj(),
BSONObj(),
- 0));
+ boost::none));
uassert(50986, str::stream() << "Could not find any shard documents", !shardDocs.docs.empty());
for (const auto& tag : tags) {
diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp
index 2ac000c0075..83719560bc3 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp
@@ -103,7 +103,7 @@ void startTransactionWithNoopFind(OperationContext* opCtx,
BSONObjBuilder findCmdBuilder;
QueryRequest qr(nss);
qr.setBatchSize(0);
- qr.setWantMore(false);
+ qr.setSingleBatchField(true);
qr.asFindCommand(&findCmdBuilder);
auto res = runCommandInLocalTxn(
diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp
index 06b3d8228d7..adae158b28f 100644
--- a/src/mongo/dbtests/query_stage_subplan.cpp
+++ b/src/mongo/dbtests/query_stage_subplan.cpp
@@ -95,7 +95,9 @@ protected:
BSONObj cmdObj = fromjson(findCmd);
bool isExplain = false;
- auto qr = unittest::assertGet(QueryRequest::makeFromFindCommand(nss, cmdObj, isExplain));
+ // If there is no '$db', append it.
+ auto cmd = OpMsgRequest::fromDBAndBody("test", cmdObj).body;
+ auto qr = QueryRequest::makeFromFindCommand(cmd, isExplain, NamespaceString());
auto cq = unittest::assertGet(
CanonicalQuery::canonicalize(opCtx(),
diff --git a/src/mongo/idl/basic_types.h b/src/mongo/idl/basic_types.h
index fb574d44a19..b0647e46733 100644
--- a/src/mongo/idl/basic_types.h
+++ b/src/mongo/idl/basic_types.h
@@ -47,7 +47,7 @@ namespace mongo {
class OptionalBool {
public:
static OptionalBool parseFromBSON(BSONElement element) {
- uassert(ErrorCodes::BadValue,
+ uassert(ErrorCodes::TypeMismatch,
str::stream() << "Field '" << element.fieldNameStringData()
<< "' should be a boolean value, but found: " << element.type(),
!element || element.type() == BSONType::Bool);
diff --git a/src/mongo/idl/idl_parser.cpp b/src/mongo/idl/idl_parser.cpp
index 0030f864380..89158281e92 100644
--- a/src/mongo/idl/idl_parser.cpp
+++ b/src/mongo/idl/idl_parser.cpp
@@ -335,4 +335,29 @@ std::vector<std::vector<std::uint8_t>> transformVector(const std::vector<ConstDa
return output;
}
+
+void noOpSerializer(bool, StringData fieldName, BSONObjBuilder* bob) {}
+
+void serializeBSONWhenNotEmpty(BSONObj obj, StringData fieldName, BSONObjBuilder* bob) {
+ if (!obj.isEmpty()) {
+ bob->append(fieldName, obj);
+ }
+}
+
+BSONObj parseOwnedBSON(BSONElement element) {
+ uassert(ErrorCodes::TypeMismatch,
+ str::stream() << "Expected field " << element.fieldNameStringData()
+ << "to be of type object",
+ element.type() == BSONType::Object);
+ return element.Obj().getOwned();
+}
+
+bool parseBoolean(BSONElement element) {
+ uassert(ErrorCodes::TypeMismatch,
+ str::stream() << "Expected field " << element.fieldNameStringData()
+ << "to be of type object",
+ element.type() == BSONType::Bool);
+ return element.boolean();
+}
+
} // namespace mongo
diff --git a/src/mongo/idl/idl_parser.h b/src/mongo/idl/idl_parser.h
index 4e58fbd576f..551c87bf309 100644
--- a/src/mongo/idl/idl_parser.h
+++ b/src/mongo/idl/idl_parser.h
@@ -249,4 +249,12 @@ std::vector<std::string> transformVector(const std::vector<StringData>& input);
std::vector<ConstDataRange> transformVector(const std::vector<std::vector<std::uint8_t>>& input);
std::vector<std::vector<std::uint8_t>> transformVector(const std::vector<ConstDataRange>& input);
+void noOpSerializer(bool, StringData fieldName, BSONObjBuilder* bob);
+
+void serializeBSONWhenNotEmpty(BSONObj obj, StringData fieldName, BSONObjBuilder* bob);
+
+BSONObj parseOwnedBSON(BSONElement element);
+
+bool parseBoolean(BSONElement element);
+
} // namespace mongo
diff --git a/src/mongo/idl/idl_test.cpp b/src/mongo/idl/idl_test.cpp
index da7e8ee6f18..f1b7503dd68 100644
--- a/src/mongo/idl/idl_test.cpp
+++ b/src/mongo/idl/idl_test.cpp
@@ -3068,5 +3068,49 @@ TEST(IDLTypeCommand, TestCommandWithIDLAnyTypeField) {
}
}
+TEST(IDLCommand, BasicNamespaceConstGetterCommand_TestNonConstGetterGeneration) {
+ IDLParserErrorContext ctxt("root");
+ const auto uuid = UUID::gen();
+ auto testDoc =
+ BSON(BasicNamespaceConstGetterCommand::kCommandName << uuid << "field1" << 3 << "$db"
+ << "db");
+
+ auto testStruct = BasicNamespaceConstGetterCommand::parse(ctxt, makeOMR(testDoc));
+ ASSERT_EQUALS(testStruct.getField1(), 3);
+ ASSERT_EQUALS(testStruct.getNamespaceOrUUID().uuid().get(), uuid);
+
+ // Verify that both const and non-const getters are generated.
+ assert_same_types<decltype(
+ std::declval<BasicNamespaceConstGetterCommand>().getNamespaceOrUUID()),
+ NamespaceStringOrUUID&>();
+ assert_same_types<
+ decltype(std::declval<const BasicNamespaceConstGetterCommand>().getNamespaceOrUUID()),
+ const NamespaceStringOrUUID&>();
+
+ // Test we can roundtrip from the just parsed document.
+ {
+ BSONObjBuilder builder;
+ OpMsgRequest reply = testStruct.serialize(BSONObj());
+
+ ASSERT_BSONOBJ_EQ(testDoc, reply.body);
+ }
+
+ // Test mutable getter modifies the command object.
+ {
+ auto& nssOrUuid = testStruct.getNamespaceOrUUID();
+ const auto nss = NamespaceString("test.coll");
+ nssOrUuid.setNss(nss);
+ nssOrUuid.preferNssForSerialization();
+
+ BSONObjBuilder builder;
+ testStruct.serialize(BSONObj(), &builder);
+
+ // Verify that nss was used for serialization over uuid.
+ ASSERT_BSONOBJ_EQ(builder.obj(),
+ BSON(BasicNamespaceConstGetterCommand::kCommandName << "coll"
+ << "field1" << 3));
+ }
+}
+
} // namespace
} // namespace mongo
diff --git a/src/mongo/idl/unittest.idl b/src/mongo/idl/unittest.idl
index cb1ffa783f3..4b301e5cf44 100644
--- a/src/mongo/idl/unittest.idl
+++ b/src/mongo/idl/unittest.idl
@@ -674,6 +674,14 @@ commands:
field1: int
field2: string
+ BasicNamespaceConstGetterCommand:
+ description: UnitTest for a basic concatenate_with_db_or_uuid command
+ command_name: BasicNamespaceConstGetterCommand
+ namespace: concatenate_with_db_or_uuid
+ non_const_getter: true
+ fields:
+ field1: int
+
KnownFieldCommand:
description: UnitTest for a command that has a field that is special known generic command field
command_name: KnownFieldCommand
diff --git a/src/mongo/s/balancer_configuration_test.cpp b/src/mongo/s/balancer_configuration_test.cpp
index 2081f9ec8b3..7680a4e482f 100644
--- a/src/mongo/s/balancer_configuration_test.cpp
+++ b/src/mongo/s/balancer_configuration_test.cpp
@@ -79,10 +79,8 @@ protected:
ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
rpc::TrackingMetadata::removeTrackingData(request.metadata));
- const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss.ns(), "config.settings");
-
- auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
+ auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
+ auto query = QueryRequest::makeFromFindCommand(opMsg.body, false);
ASSERT_EQ(query->nss().ns(), "config.settings");
ASSERT_BSONOBJ_EQ(query->getFilter(), BSON("_id" << key));
diff --git a/src/mongo/s/catalog/sharding_catalog_client_test.cpp b/src/mongo/s/catalog/sharding_catalog_client_test.cpp
index 63fed92f4d9..85ec4605657 100644
--- a/src/mongo/s/catalog/sharding_catalog_client_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_client_test.cpp
@@ -99,10 +99,8 @@ TEST_F(ShardingCatalogClientTest, GetCollectionExisting) {
ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
rpc::TrackingMetadata::removeTrackingData(request.metadata));
- const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss, CollectionType::ConfigNS);
-
- auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
+ auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
+ auto query = QueryRequest::makeFromFindCommand(opMsg.body, false);
// Ensure the query is correct
ASSERT_EQ(query->nss(), CollectionType::ConfigNS);
@@ -168,13 +166,11 @@ TEST_F(ShardingCatalogClientTest, GetDatabaseExisting) {
});
onFindWithMetadataCommand([this, &expectedDb, newOpTime](const RemoteCommandRequest& request) {
- const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss, DatabaseType::ConfigNS);
-
ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
rpc::TrackingMetadata::removeTrackingData(request.metadata));
- auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
+ auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
+ auto query = QueryRequest::makeFromFindCommand(opMsg.body, false);
ASSERT_EQ(query->nss(), DatabaseType::ConfigNS);
ASSERT_BSONOBJ_EQ(query->getFilter(), BSON(DatabaseType::name(expectedDb.getName())));
@@ -302,10 +298,8 @@ TEST_F(ShardingCatalogClientTest, GetAllShardsValid) {
ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
rpc::TrackingMetadata::removeTrackingData(request.metadata));
- const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss, ShardType::ConfigNS);
-
- auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
+ auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
+ auto query = QueryRequest::makeFromFindCommand(opMsg.body, false);
ASSERT_EQ(query->nss(), ShardType::ConfigNS);
ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj());
@@ -401,10 +395,8 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) {
ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
rpc::TrackingMetadata::removeTrackingData(request.metadata));
- const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss, ChunkType::ConfigNS);
-
- auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
+ auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
+ auto query = QueryRequest::makeFromFindCommand(opMsg.body, false);
ASSERT_EQ(query->nss(), ChunkType::ConfigNS);
ASSERT_BSONOBJ_EQ(query->getFilter(), chunksQuery);
@@ -460,10 +452,8 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSNoSortNoLimit) {
ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
rpc::TrackingMetadata::removeTrackingData(request.metadata));
- const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss, ChunkType::ConfigNS);
-
- auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
+ auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
+ auto query = QueryRequest::makeFromFindCommand(opMsg.body, false);
ASSERT_EQ(query->nss(), ChunkType::ConfigNS);
ASSERT_BSONOBJ_EQ(query->getFilter(), chunksQuery);
@@ -774,10 +764,8 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsNoDb) {
ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
rpc::TrackingMetadata::removeTrackingData(request.metadata));
- const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss, CollectionType::ConfigNS);
-
- auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
+ auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
+ auto query = QueryRequest::makeFromFindCommand(opMsg.body, false);
ASSERT_EQ(query->nss(), CollectionType::ConfigNS);
ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj());
@@ -823,10 +811,8 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsWithDb) {
ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
rpc::TrackingMetadata::removeTrackingData(request.metadata));
- const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss, CollectionType::ConfigNS);
-
- auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
+ auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
+ auto query = QueryRequest::makeFromFindCommand(opMsg.body, false);
ASSERT_EQ(query->nss(), CollectionType::ConfigNS);
{
@@ -859,13 +845,11 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsInvalidCollectionType) {
validColl.setUnique(true);
onFindCommand([this, validColl](const RemoteCommandRequest& request) {
- const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss, CollectionType::ConfigNS);
-
ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
rpc::TrackingMetadata::removeTrackingData(request.metadata));
- auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
+ auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
+ auto query = QueryRequest::makeFromFindCommand(opMsg.body, false);
ASSERT_EQ(query->nss(), CollectionType::ConfigNS);
{
@@ -900,10 +884,8 @@ TEST_F(ShardingCatalogClientTest, GetDatabasesForShardValid) {
ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
rpc::TrackingMetadata::removeTrackingData(request.metadata));
- const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss, DatabaseType::ConfigNS);
-
- auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
+ auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
+ auto query = QueryRequest::makeFromFindCommand(opMsg.body, false);
ASSERT_EQ(query->nss(), DatabaseType::ConfigNS);
ASSERT_BSONOBJ_EQ(query->getFilter(),
@@ -970,10 +952,8 @@ TEST_F(ShardingCatalogClientTest, GetTagsForCollection) {
ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
rpc::TrackingMetadata::removeTrackingData(request.metadata));
- const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss, TagsType::ConfigNS);
-
- auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
+ auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
+ auto query = QueryRequest::makeFromFindCommand(opMsg.body, false);
ASSERT_EQ(query->nss(), TagsType::ConfigNS);
ASSERT_BSONOBJ_EQ(query->getFilter(), BSON(TagsType::ns("TestDB.TestColl")));
@@ -1317,10 +1297,9 @@ TEST_F(ShardingCatalogClientTest, GetNewKeys) {
ASSERT_EQ("config:123", request.target.toString());
ASSERT_EQ("admin", request.dbname);
- const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(KeysCollectionDocument::ConfigNS, nss);
+ auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
+ auto query = QueryRequest::makeFromFindCommand(opMsg.body, false);
- auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
BSONObj expectedQuery(
fromjson("{purpose: 'none',"
@@ -1370,10 +1349,8 @@ TEST_F(ShardingCatalogClientTest, GetNewKeysWithEmptyCollection) {
ASSERT_EQ("config:123", request.target.toString());
ASSERT_EQ("admin", request.dbname);
- const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(KeysCollectionDocument::ConfigNS, nss);
-
- auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
+ auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
+ auto query = QueryRequest::makeFromFindCommand(opMsg.body, false);
BSONObj expectedQuery(
fromjson("{purpose: 'none',"
diff --git a/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp b/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
index f7bae8c4d2b..6291f814fdf 100644
--- a/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
+++ b/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp
@@ -145,6 +145,13 @@ TEST_F(InsertRetryTest, RetryOnNetworkErrorFails) {
future.default_timed_get();
}
+void assertFindRequestHasFilter(const RemoteCommandRequest& request, BSONObj filter) {
+ // If there is no '$db', append it.
+ auto cmd = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj).body;
+ auto query = QueryRequest::makeFromFindCommand(cmd, false);
+ ASSERT_BSONOBJ_EQ(filter, query->getFilter());
+}
+
TEST_F(InsertRetryTest, DuplicateKeyErrorAfterNetworkErrorMatch) {
configTargeter()->setFindHostReturnValue({kTestHosts[0]});
@@ -173,9 +180,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterNetworkErrorMatch) {
onFindCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQ(request.target, kTestHosts[1]);
- auto query =
- assertGet(QueryRequest::makeFromFindCommand(kTestNamespace, request.cmdObj, false));
- ASSERT_BSONOBJ_EQ(BSON("_id" << 1), query->getFilter());
+ assertFindRequestHasFilter(request, BSON("_id" << 1));
return vector<BSONObj>{objToInsert};
});
@@ -211,10 +216,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterNetworkErrorNotFound) {
onFindCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQ(request.target, kTestHosts[1]);
- auto query =
- assertGet(QueryRequest::makeFromFindCommand(kTestNamespace, request.cmdObj, false));
- ASSERT_BSONOBJ_EQ(BSON("_id" << 1), query->getFilter());
-
+ assertFindRequestHasFilter(request, BSON("_id" << 1));
return vector<BSONObj>();
});
@@ -249,9 +251,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterNetworkErrorMismatch) {
onFindCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQ(request.target, kTestHosts[1]);
- auto query =
- assertGet(QueryRequest::makeFromFindCommand(kTestNamespace, request.cmdObj, false));
- ASSERT_BSONOBJ_EQ(BSON("_id" << 1), query->getFilter());
+ assertFindRequestHasFilter(request, BSON("_id" << 1));
return vector<BSONObj>{BSON("_id" << 1 << "Value"
<< "TestValue has changed")};
@@ -305,9 +305,7 @@ TEST_F(InsertRetryTest, DuplicateKeyErrorAfterWriteConcernFailureMatch) {
onFindCommand([&](const RemoteCommandRequest& request) {
ASSERT_EQ(request.target, kTestHosts[0]);
- auto query =
- assertGet(QueryRequest::makeFromFindCommand(kTestNamespace, request.cmdObj, false));
- ASSERT_BSONOBJ_EQ(BSON("_id" << 1), query->getFilter());
+ assertFindRequestHasFilter(request, BSON("_id" << 1));
return vector<BSONObj>{objToInsert};
});
diff --git a/src/mongo/s/catalog_cache_refresh_test.cpp b/src/mongo/s/catalog_cache_refresh_test.cpp
index fed17821f42..619e4f84077 100644
--- a/src/mongo/s/catalog_cache_refresh_test.cpp
+++ b/src/mongo/s/catalog_cache_refresh_test.cpp
@@ -609,8 +609,8 @@ TEST_F(CatalogCacheRefreshTest, ChunkEpochChangeDuringIncrementalLoadRecoveryAft
// recreated it with different epoch and chunks.
expectGetCollection(oldVersion.epoch(), shardKeyPattern);
onFindCommand([&](const RemoteCommandRequest& request) {
- const auto diffQuery =
- assertGet(QueryRequest::makeFromFindCommand(kNss, request.cmdObj, false));
+ auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
+ auto diffQuery = QueryRequest::makeFromFindCommand(opMsg.body, false);
ASSERT_BSONOBJ_EQ(BSON("ns" << kNss.ns() << "lastmod"
<< BSON("$gte" << Timestamp(oldVersion.majorVersion(),
oldVersion.minorVersion()))),
@@ -641,8 +641,8 @@ TEST_F(CatalogCacheRefreshTest, ChunkEpochChangeDuringIncrementalLoadRecoveryAft
onFindCommand([&](const RemoteCommandRequest& request) {
// Ensure it is a differential query but starting from version zero (to fetch all the
// chunks) since the incremental refresh above produced a different version
- const auto diffQuery =
- assertGet(QueryRequest::makeFromFindCommand(kNss, request.cmdObj, false));
+ auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
+ auto diffQuery = QueryRequest::makeFromFindCommand(opMsg.body, false);
ASSERT_BSONOBJ_EQ(BSON("ns" << kNss.ns() << "lastmod" << BSON("$gte" << Timestamp(0, 0))),
diffQuery->getFilter());
@@ -695,8 +695,8 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterCollectionEpochChange) {
// Return set of chunks, which represent a split
onFindCommand([&](const RemoteCommandRequest& request) {
// Ensure it is a differential query but starting from version zero
- const auto diffQuery =
- assertGet(QueryRequest::makeFromFindCommand(kNss, request.cmdObj, false));
+ auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
+ auto diffQuery = QueryRequest::makeFromFindCommand(opMsg.body, false);
ASSERT_BSONOBJ_EQ(BSON("ns" << kNss.ns() << "lastmod" << BSON("$gte" << Timestamp(0, 0))),
diffQuery->getFilter());
@@ -741,8 +741,8 @@ TEST_F(CatalogCacheRefreshTest, IncrementalLoadAfterSplit) {
// Return set of chunks, which represent a split
onFindCommand([&](const RemoteCommandRequest& request) {
// Ensure it is a differential query
- const auto diffQuery =
- assertGet(QueryRequest::makeFromFindCommand(kNss, request.cmdObj, false));
+ auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
+ auto diffQuery = QueryRequest::makeFromFindCommand(opMsg.body, false);
ASSERT_BSONOBJ_EQ(
BSON("ns" << kNss.ns() << "lastmod"
<< BSON("$gte" << Timestamp(version.majorVersion(), version.minorVersion()))),
diff --git a/src/mongo/s/client/shard_remote.cpp b/src/mongo/s/client/shard_remote.cpp
index eaea181f039..6487164a274 100644
--- a/src/mongo/s/client/shard_remote.cpp
+++ b/src/mongo/s/client/shard_remote.cpp
@@ -379,7 +379,7 @@ StatusWith<Shard::QueryResponse> ShardRemote::_exhaustiveFindOnConfig(
qr.setFilter(query);
qr.setSort(sort);
qr.setReadConcern(readConcernObj);
- qr.setLimit(limit);
+ qr.setLimit(limit ? static_cast<boost::optional<std::int64_t>>(*limit) : boost::none);
if (maxTimeMS < Milliseconds::max()) {
qr.setMaxTimeMS(durationCount<Milliseconds>(maxTimeMS));
diff --git a/src/mongo/s/cluster_identity_loader_test.cpp b/src/mongo/s/cluster_identity_loader_test.cpp
index fbdab8f97b1..b46a7a234c4 100644
--- a/src/mongo/s/cluster_identity_loader_test.cpp
+++ b/src/mongo/s/cluster_identity_loader_test.cpp
@@ -76,10 +76,8 @@ public:
ASSERT_BSONOBJ_EQ(getReplSecondaryOkMetadata(),
rpc::TrackingMetadata::removeTrackingData(request.metadata));
- const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
- ASSERT_EQ(nss.ns(), "config.version");
-
- auto query = assertGet(QueryRequest::makeFromFindCommand(nss, request.cmdObj, false));
+ auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj);
+ auto query = QueryRequest::makeFromFindCommand(opMsg.body, false);
ASSERT_EQ(query->nss().ns(), "config.version");
ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj());
diff --git a/src/mongo/s/commands/cluster_find_cmd.cpp b/src/mongo/s/commands/cluster_find_cmd.cpp
index 05ed5c43e7a..dbfd1d539ef 100644
--- a/src/mongo/s/commands/cluster_find_cmd.cpp
+++ b/src/mongo/s/commands/cluster_find_cmd.cpp
@@ -61,9 +61,8 @@ std::unique_ptr<QueryRequest> parseCmdObjectToQueryRequest(OperationContext* opC
NamespaceString nss,
BSONObj cmdObj,
bool isExplain) {
- auto qr = uassertStatusOK(
- QueryRequest::makeFromFindCommand(std::move(nss), std::move(cmdObj), isExplain));
- if (qr->getReadConcern().isEmpty()) {
+ auto qr = QueryRequest::makeFromFindCommand(std::move(cmdObj), isExplain, std::move(nss));
+ if (!qr->getReadConcern()) {
if (opCtx->isStartingMultiDocumentTransaction() || !opCtx->inMultiDocumentTransaction()) {
// If there is no explicit readConcern in the cmdObj, and this is either the first
// operation in a transaction, or not running in a transaction, then use the readConcern
diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp
index de9b1fa144f..0cb7b9b8e31 100644
--- a/src/mongo/s/query/cluster_find.cpp
+++ b/src/mongo/s/query/cluster_find.cpp
@@ -90,7 +90,7 @@ const char kFindCmdName[] = "find";
StatusWith<std::unique_ptr<QueryRequest>> transformQueryForShards(
const QueryRequest& qr, bool appendGeoNearDistanceProjection) {
// If there is a limit, we forward the sum of the limit and the skip.
- boost::optional<long long> newLimit;
+ boost::optional<int64_t> newLimit;
if (qr.getLimit()) {
long long newLimitValue;
if (overflow::add(*qr.getLimit(), qr.getSkip().value_or(0), &newLimitValue)) {
@@ -104,11 +104,12 @@ StatusWith<std::unique_ptr<QueryRequest>> transformQueryForShards(
}
// Similarly, if nToReturn is set, we forward the sum of nToReturn and the skip.
- boost::optional<long long> newNToReturn;
+ boost::optional<int64_t> newNToReturn;
if (qr.getNToReturn()) {
- // !wantMore and ntoreturn mean the same as !wantMore and limit, so perform the conversion.
- if (!qr.wantMore()) {
- long long newLimitValue;
+ // 'singleBatch' and ntoreturn mean the same as 'singleBatch' and limit, so perform the
+ // conversion.
+ if (qr.isSingleBatch()) {
+ int64_t newLimitValue;
if (overflow::add(*qr.getNToReturn(), qr.getSkip().value_or(0), &newLimitValue)) {
return Status(ErrorCodes::Overflow,
str::stream()
@@ -118,7 +119,7 @@ StatusWith<std::unique_ptr<QueryRequest>> transformQueryForShards(
}
newLimit = newLimitValue;
} else {
- long long newNToReturnValue;
+ int64_t newNToReturnValue;
if (overflow::add(*qr.getNToReturn(), qr.getSkip().value_or(0), &newNToReturnValue)) {
return Status(ErrorCodes::Overflow,
str::stream()
@@ -153,10 +154,10 @@ StatusWith<std::unique_ptr<QueryRequest>> transformQueryForShards(
newQR->setLimit(newLimit);
newQR->setNToReturn(newNToReturn);
- // Even if the client sends us singleBatch=true (wantMore=false), we may need to retrieve
+ // Even if the client sends us singleBatch=true, we may need to retrieve
// multiple batches from a shard in order to return the single requested batch to the client.
- // Therefore, we must always send singleBatch=false (wantMore=true) to the shards.
- newQR->setWantMore(true);
+ // Therefore, we must always send singleBatch=false to the shards.
+ newQR->setSingleBatchField(false);
// Any expansion of the 'showRecordId' flag should have already happened on mongos.
newQR->setShowRecordId(false);
@@ -379,7 +380,7 @@ CursorId runQueryWithoutRetrying(OperationContext* opCtx,
ccc->detachFromOperationContext();
- if (!query.getQueryRequest().wantMore() && !ccc->isTailable()) {
+ if (query.getQueryRequest().isSingleBatch() && !ccc->isTailable()) {
cursorState = ClusterCursorManager::CursorState::Exhausted;
}
diff --git a/src/mongo/s/query/results_merger_test_fixture.h b/src/mongo/s/query/results_merger_test_fixture.h
index 8fc20eb2903..d920ba7266c 100644
--- a/src/mongo/s/query/results_merger_test_fixture.h
+++ b/src/mongo/s/query/results_merger_test_fixture.h
@@ -70,8 +70,9 @@ protected:
if (findCmd) {
- const auto qr = unittest::assertGet(
- QueryRequest::makeFromFindCommand(kTestNss, *findCmd, false /* isExplain */));
+ // If there is no '$db', append it.
+ auto cmd = OpMsgRequest::fromDBAndBody(kTestNss.db(), *findCmd).body;
+ const auto qr = QueryRequest::makeFromFindCommand(cmd, false /* isExplain */, kTestNss);
if (!qr->getSort().isEmpty()) {
params.setSort(qr->getSort().getOwned());
}
diff --git a/src/mongo/s/sessions_collection_sharded.cpp b/src/mongo/s/sessions_collection_sharded.cpp
index 22915bd2c0a..eccd1a732f4 100644
--- a/src/mongo/s/sessions_collection_sharded.cpp
+++ b/src/mongo/s/sessions_collection_sharded.cpp
@@ -167,8 +167,12 @@ LogicalSessionIdSet SessionsCollectionSharded::findRemovedSessions(
OperationContext* opCtx, const LogicalSessionIdSet& sessions) {
auto send = [&](BSONObj toSend) -> BSONObj {
- auto qr = uassertStatusOK(QueryRequest::makeFromFindCommand(
- NamespaceString::kLogicalSessionsNamespace, toSend, false));
+ // If there is no '$db', append it.
+ toSend =
+ OpMsgRequest::fromDBAndBody(NamespaceString::kLogicalSessionsNamespace.db(), toSend)
+ .body;
+ auto qr = QueryRequest::makeFromFindCommand(
+ toSend, false, NamespaceString::kLogicalSessionsNamespace);
const boost::intrusive_ptr<ExpressionContext> expCtx;
auto cq = uassertStatusOK(
diff --git a/src/mongo/s/sharding_router_test_fixture.cpp b/src/mongo/s/sharding_router_test_fixture.cpp
index 04c652dcd8d..670620bf52e 100644
--- a/src/mongo/s/sharding_router_test_fixture.cpp
+++ b/src/mongo/s/sharding_router_test_fixture.cpp
@@ -255,10 +255,9 @@ void ShardingTestFixture::expectGetShards(const std::vector<ShardType>& shards)
const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String());
ASSERT_EQ(nss, ShardType::ConfigNS);
- auto queryResult = QueryRequest::makeFromFindCommand(nss, request.cmdObj, false);
- ASSERT_OK(queryResult.getStatus());
-
- const auto& query = queryResult.getValue();
+ // If there is no '$db', append it.
+ auto cmd = OpMsgRequest::fromDBAndBody(nss.db(), request.cmdObj).body;
+ auto query = QueryRequest::makeFromFindCommand(cmd, false, nss);
ASSERT_EQ(query->nss(), ShardType::ConfigNS);
ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj());
diff --git a/src/mongo/shell/bench.cpp b/src/mongo/shell/bench.cpp
index 71c7d8349f3..6aaf8a679ad 100644
--- a/src/mongo/shell/bench.cpp
+++ b/src/mongo/shell/bench.cpp
@@ -244,7 +244,7 @@ int runQueryWithReadCommands(DBClientBase* conn,
int count = cursorResponse.getBatch().size();
if (objOut) {
- invariant(qr->getLimit() && *qr->getLimit() == 1 && !qr->wantMore());
+ invariant(qr->getLimit() && *qr->getLimit() == 1 && qr->isSingleBatch());
// Since this is a "single batch" query, we can simply grab the first item in the result set
// and return here.
*objOut = (count > 0) ? cursorResponse.getBatch()[0] : BSONObj();
@@ -294,7 +294,7 @@ Timestamp getLatestClusterTime(DBClientBase* conn) {
auto qr = std::make_unique<QueryRequest>(NamespaceString("local.oplog.rs"));
qr->setSort(BSON("$natural" << -1));
qr->setLimit(1LL);
- qr->setWantMore(false);
+ qr->setSingleBatchField(true);
invariant(qr->validate());
const auto dbName = qr->nss().db().toString();
@@ -995,7 +995,7 @@ void BenchRunOp::executeOnce(DBClientBase* conn,
qr->setFilter(fixedQuery);
qr->setProj(this->projection);
qr->setLimit(1LL);
- qr->setWantMore(false);
+ qr->setSingleBatchField(true);
if (config.useSnapshotReads) {
qr->setReadConcern(readConcernSnapshot);
}