diff options
author | Irina Yatsenko <irina.yatsenko@mongodb.com> | 2021-08-16 15:35:51 +0000 |
---|---|---|
committer | Evergreen Agent <no-reply@evergreen.mongodb.com> | 2021-08-23 19:44:53 +0000 |
commit | df329d8f46e1485dd5d70379f9c48bf4175f0d5a (patch) | |
tree | 80adf0442b021bdd689d59697a6b85ebf4dab24d /src | |
parent | 5cf8a293567989fcc970fb21cde4a1af111c8b58 (diff) | |
download | mongo-df329d8f46e1485dd5d70379f9c48bf4175f0d5a.tar.gz |
SERVER-58670 Tighten up what kind of BSON the 'Query' type can be wrapped around
This refactor includes:
Remove dead code from 'Query' type and reduce it public interface.
Split query argument in query/update/removed methods into filter BSON and settings (which are still passed around as 'Query' type).
Remove Query(string) constructors.
Remove most callers of 'Query(const BSONObj&)'.
Replace public 'Query(const BSON&)' and 'Query.obj' with an explicit factory method and a getter.
Diffstat (limited to 'src')
106 files changed, 1219 insertions, 1057 deletions
diff --git a/src/mongo/client/dbclient_base.cpp b/src/mongo/client/dbclient_base.cpp index c0018211757..dce2f0a5b2a 100644 --- a/src/mongo/client/dbclient_base.cpp +++ b/src/mongo/client/dbclient_base.cpp @@ -603,12 +603,14 @@ bool DBClientBase::exists(const string& ns) { } BSONObj DBClientBase::findOne(const string& ns, - const Query& query, + const BSONObj& filter, + const Query& querySettings, const BSONObj* fieldsToReturn, int queryOptions, boost::optional<BSONObj> readConcernObj) { unique_ptr<DBClientCursor> c = this->query(NamespaceString(ns), - query, + filter, + querySettings, 1 /*limit*/, 0 /*nToSkip*/, fieldsToReturn, @@ -619,7 +621,8 @@ BSONObj DBClientBase::findOne(const string& ns, // query() throws on network error so OK to uassert with numeric code here. uassert(10276, str::stream() << "DBClientBase::findN: transport error: " << getServerAddress() - << " ns: " << ns << " query: " << query.toString(), + << " ns: " << ns << " filter: " << filter.toString() + << " query settings: " << querySettings.getFullSettingsDeprecated(), c.get()); return c->more() ? c->nextSafe() : BSONObj(); @@ -668,7 +671,8 @@ std::pair<BSONObj, NamespaceString> DBClientBase::findOneByUUID( const uint64_t DBClientBase::INVALID_SOCK_CREATION_TIME = std::numeric_limits<uint64_t>::max(); unique_ptr<DBClientCursor> DBClientBase::query(const NamespaceStringOrUUID& nsOrUuid, - Query query, + const BSONObj& filter, + const Query& querySettings, int limit, int nToSkip, const BSONObj* fieldsToReturn, @@ -677,7 +681,8 @@ unique_ptr<DBClientCursor> DBClientBase::query(const NamespaceStringOrUUID& nsOr boost::optional<BSONObj> readConcernObj) { unique_ptr<DBClientCursor> c(new DBClientCursor(this, nsOrUuid, - query.obj, + filter, + querySettings, limit, nToSkip, fieldsToReturn, @@ -708,7 +713,8 @@ struct DBClientFunConvertor { unsigned long long DBClientBase::query(std::function<void(const BSONObj&)> f, const NamespaceStringOrUUID& nsOrUuid, - Query query, + const BSONObj& filter, + const Query& querySettings, const BSONObj* fieldsToReturn, int queryOptions, int batchSize, @@ -716,13 +722,20 @@ unsigned long long DBClientBase::query(std::function<void(const BSONObj&)> f, DBClientFunConvertor fun; fun._f = f; std::function<void(DBClientCursorBatchIterator&)> ptr(fun); - return this->query( - ptr, nsOrUuid, std::move(query), fieldsToReturn, queryOptions, batchSize, readConcernObj); + return this->query(ptr, + nsOrUuid, + filter, + querySettings, + fieldsToReturn, + queryOptions, + batchSize, + readConcernObj); } unsigned long long DBClientBase::query(std::function<void(DBClientCursorBatchIterator&)> f, const NamespaceStringOrUUID& nsOrUuid, - Query query, + const BSONObj& filter, + const Query& querySettings, const BSONObj* fieldsToReturn, int queryOptions, int batchSize, @@ -730,8 +743,15 @@ unsigned long long DBClientBase::query(std::function<void(DBClientCursorBatchIte // mask options queryOptions &= (int)(QueryOption_NoCursorTimeout | QueryOption_SecondaryOk); - unique_ptr<DBClientCursor> c(this->query( - nsOrUuid, query, 0, 0, fieldsToReturn, queryOptions, batchSize, readConcernObj)); + unique_ptr<DBClientCursor> c(this->query(nsOrUuid, + filter, + querySettings, + 0, + 0, + fieldsToReturn, + queryOptions, + batchSize, + readConcernObj)); // query() throws on network error so OK to uassert with numeric code here. uassert(16090, "socket error for mapping query", c.get()); @@ -764,8 +784,8 @@ OpMsgRequest createInsertRequest(const string& ns, } OpMsgRequest createUpdateRequest(const string& ns, - Query query, - BSONObj obj, + const BSONObj& filter, + BSONObj updateSpec, bool upsert, bool multi, boost::optional<BSONObj> writeConcernObj) { @@ -779,13 +799,13 @@ OpMsgRequest createUpdateRequest(const string& ns, auto request = OpMsgRequest::fromDBAndBody(nss.db(), cmdBuilder.obj()); request.sequences.push_back( {"updates", - {BSON("q" << query.obj << "u" << obj << "upsert" << upsert << "multi" << multi)}}); + {BSON("q" << filter << "u" << updateSpec << "upsert" << upsert << "multi" << multi)}}); return request; } OpMsgRequest createRemoveRequest(const string& ns, - Query obj, + const BSONObj& filter, bool removeMany, boost::optional<BSONObj> writeConcernObj) { const int limit = removeMany ? 0 : 1; @@ -797,7 +817,7 @@ OpMsgRequest createRemoveRequest(const string& ns, cmdBuilder.append(WriteConcernOptions::kWriteConcernField, *writeConcernObj); } auto request = OpMsgRequest::fromDBAndBody(nss.db(), cmdBuilder.obj()); - request.sequences.push_back({"deletes", {BSON("q" << obj.obj << "limit" << limit)}}); + request.sequences.push_back({"deletes", {BSON("q" << filter << "limit" << limit)}}); return request; } @@ -828,40 +848,40 @@ void DBClientBase::insert(const string& ns, } BSONObj DBClientBase::removeAcknowledged(const string& ns, - Query obj, + const BSONObj& filter, bool removeMany, boost::optional<BSONObj> writeConcernObj) { - OpMsgRequest request = createRemoveRequest(ns, obj, removeMany, writeConcernObj); + OpMsgRequest request = createRemoveRequest(ns, filter, removeMany, writeConcernObj); rpc::UniqueReply reply = runCommand(std::move(request)); return reply->getCommandReply(); } void DBClientBase::remove(const string& ns, - Query obj, + const BSONObj& filter, bool removeMany, boost::optional<BSONObj> writeConcernObj) { - auto request = createRemoveRequest(ns, obj, removeMany, writeConcernObj); + auto request = createRemoveRequest(ns, filter, removeMany, writeConcernObj); runFireAndForgetCommand(std::move(request)); } BSONObj DBClientBase::updateAcknowledged(const string& ns, - Query query, - BSONObj obj, + const BSONObj& filter, + BSONObj updateSpec, bool upsert, bool multi, boost::optional<BSONObj> writeConcernObj) { - auto request = createUpdateRequest(ns, query, obj, upsert, multi, writeConcernObj); + auto request = createUpdateRequest(ns, filter, updateSpec, upsert, multi, writeConcernObj); rpc::UniqueReply reply = runCommand(std::move(request)); return reply->getCommandReply(); } void DBClientBase::update(const string& ns, - Query query, - BSONObj obj, + const BSONObj& filter, + BSONObj updateSpec, bool upsert, bool multi, boost::optional<BSONObj> writeConcernObj) { - auto request = createUpdateRequest(ns, query, obj, upsert, multi, writeConcernObj); + auto request = createUpdateRequest(ns, filter, updateSpec, upsert, multi, writeConcernObj); runFireAndForgetCommand(std::move(request)); } diff --git a/src/mongo/client/dbclient_base.h b/src/mongo/client/dbclient_base.h index 56227d92b40..5d339afc130 100644 --- a/src/mongo/client/dbclient_base.h +++ b/src/mongo/client/dbclient_base.h @@ -519,9 +519,16 @@ public: /** * Returns a single object that matches the query. if none do, then the object is empty. * Throws AssertionException. + * + * The 'querySettings' argument might contain a subset of query settings, such as sort, hint, + * etc. If the passed in 'querySettings' object also includes a filter (in its 'query'/'$query' + * field), the filter will be ignored. Pass in the desired filter's BSON as 'filter' instead. + * The other options parameters exist for historic reasons and will be eventually combined with + * 'querySettings' into a single 'QueryOptions' parameter. */ virtual BSONObj findOne(const std::string& ns, - const Query& query, + const BSONObj& filter, + const Query& querySettings = Query(), const BSONObj* fieldsToReturn = nullptr, int queryOptions = 0, boost::optional<BSONObj> readConcernObj = boost::none); @@ -544,19 +551,28 @@ public: * Sends a query to the database. * * 'ns': Namespace to query, format is <dbname>.<collectname>[.<collectname>]* - * 'query': Query to perform on the collection. + * 'filter': Query to perform on the collection. + * 'querySettings': sort, hint, readPref, etc. * 'limit': The maximum number of documents that the cursor should return. 0 = unlimited. * 'nToSkip': Start with the nth item. * 'fieldsToReturn': Optional template of which fields to select. If unspecified, returns all * fields. * 'queryOptions': See options enum at top of this file. * + * Notes: + * The 'querySettings' argument might contain a subset of query settings, such as sort, hint, + * etc. If the passed in 'querySettings' object also includes a filter (in its 'query'/'$query' + * field), the filter will be ignored. Pass in the desired filter's BSON as 'filter' instead. + * The other options parameters exist for historic reasons and will be eventually combined with + * 'querySettings' into a single 'QueryOptions' parameter. + * * Returns nullptr if error (connection failure). * Throws AssertionException. */ virtual std::unique_ptr<DBClientCursor> query( const NamespaceStringOrUUID& nsOrUuid, - Query query, + const BSONObj& filter, + const Query& querySettings = Query(), int limit = 0, int nToSkip = 0, const BSONObj* fieldsToReturn = nullptr, @@ -575,13 +591,20 @@ public: * Use the DBClientCursorBatchIterator version, below, if you want to do items in large * blocks, perhaps to avoid granular locking and such. * - * Note: + * Notes: * The version that takes a BSONObj cannot return the namespace queried when the query is done * by UUID. If this is required, use the DBClientBatchIterator version. + * + * The 'querySettings' argument might contain a subset of query settings, such as sort, hint, + * etc. If the passed in 'querySettings' object also includes a filter (in its 'query'/'$query' + * field), the filter will be ignored. Pass in the desired filter's BSON as 'filter' instead. + * The other options parameters exist for historic reasons and will be eventually combined with + * 'querySettings' into a single 'QueryOptions' parameter. */ unsigned long long query(std::function<void(const BSONObj&)> f, const NamespaceStringOrUUID& nsOrUuid, - Query query, + const BSONObj& filter, + const Query& querySettings = Query(), const BSONObj* fieldsToReturn = nullptr, int queryOptions = QueryOption_Exhaust, int batchSize = 0, @@ -589,7 +612,8 @@ public: virtual unsigned long long query(std::function<void(DBClientCursorBatchIterator&)> f, const NamespaceStringOrUUID& nsOrUuid, - Query query, + const BSONObj& filter, + const Query& querySettings = Query(), const BSONObj* fieldsToReturn = nullptr, int queryOptions = QueryOption_Exhaust, int batchSize = 0, @@ -642,8 +666,8 @@ public: * Executes an acknowledged command to update the objects that match the query. */ virtual BSONObj updateAcknowledged(const std::string& ns, - Query query, - BSONObj obj, + const BSONObj& filter, + BSONObj updateSpec, bool upsert = false, bool multi = false, boost::optional<BSONObj> writeConcernObj = boost::none); @@ -652,8 +676,8 @@ public: * Executes a fire-and-forget command to update the objects that match the query. */ virtual void update(const std::string& ns, - Query query, - BSONObj obj, + const BSONObj& filter, + BSONObj updateSpec, bool upsert = false, bool multi = false, boost::optional<BSONObj> writeConcernObj = boost::none); @@ -662,7 +686,7 @@ public: * Executes an acknowledged command to remove the objects that match the query. */ virtual BSONObj removeAcknowledged(const std::string& ns, - Query query, + const BSONObj& filter, bool removeMany = true, boost::optional<BSONObj> writeConcernObj = boost::none); @@ -670,7 +694,7 @@ public: * Executes a fire-and-forget command to remove the objects that match the query. */ virtual void remove(const std::string& ns, - Query query, + const BSONObj& filter, bool removeMany = true, boost::optional<BSONObj> writeConcernObj = boost::none); diff --git a/src/mongo/client/dbclient_connection.cpp b/src/mongo/client/dbclient_connection.cpp index b50487dcbfa..9770437645b 100644 --- a/src/mongo/client/dbclient_connection.cpp +++ b/src/mongo/client/dbclient_connection.cpp @@ -631,22 +631,36 @@ uint64_t DBClientConnection::getSockCreationMicroSec() const { unsigned long long DBClientConnection::query(std::function<void(DBClientCursorBatchIterator&)> f, const NamespaceStringOrUUID& nsOrUuid, - Query query, + const BSONObj& filter, + const Query& querySettings, const BSONObj* fieldsToReturn, int queryOptions, int batchSize, boost::optional<BSONObj> readConcernObj) { if (!(queryOptions & QueryOption_Exhaust) || !(availableOptions() & QueryOption_Exhaust)) { - return DBClientBase::query( - f, nsOrUuid, query, fieldsToReturn, queryOptions, batchSize, readConcernObj); + return DBClientBase::query(f, + nsOrUuid, + filter, + querySettings, + fieldsToReturn, + queryOptions, + batchSize, + readConcernObj); } // mask options queryOptions &= (int)(QueryOption_NoCursorTimeout | QueryOption_SecondaryOk | QueryOption_Exhaust); - unique_ptr<DBClientCursor> c(this->query( - nsOrUuid, query, 0, 0, fieldsToReturn, queryOptions, batchSize, readConcernObj)); + unique_ptr<DBClientCursor> c(this->query(nsOrUuid, + filter, + querySettings, + 0, + 0, + fieldsToReturn, + queryOptions, + batchSize, + readConcernObj)); // Note that this->query will throw for network errors, so it is OK to return a numeric // error code here. uassert(13386, "socket error for mapping query", c.get()); diff --git a/src/mongo/client/dbclient_connection.h b/src/mongo/client/dbclient_connection.h index b6d6aef8169..5fc6d464ea9 100644 --- a/src/mongo/client/dbclient_connection.h +++ b/src/mongo/client/dbclient_connection.h @@ -148,7 +148,8 @@ public: std::unique_ptr<DBClientCursor> query( const NamespaceStringOrUUID& nsOrUuid, - Query query = Query(), + const BSONObj& filter, + const Query& querySettings = Query(), int limit = 0, int nToSkip = 0, const BSONObj* fieldsToReturn = nullptr, @@ -157,7 +158,8 @@ public: boost::optional<BSONObj> readConcernObj = boost::none) override { checkConnection(); return DBClientBase::query(nsOrUuid, - query, + filter, + querySettings, limit, nToSkip, fieldsToReturn, @@ -166,9 +168,10 @@ public: readConcernObj); } - unsigned long long query(std::function<void(DBClientCursorBatchIterator&)> f, + unsigned long long query(std::function<void(DBClientCursorBatchIterator&)>, const NamespaceStringOrUUID& nsOrUuid, - Query query, + const BSONObj& filter, + const Query& querySettings, const BSONObj* fieldsToReturn, int queryOptions, int batchSize = 0, diff --git a/src/mongo/client/dbclient_cursor.cpp b/src/mongo/client/dbclient_cursor.cpp index afd2d5709cf..322002a6981 100644 --- a/src/mongo/client/dbclient_cursor.cpp +++ b/src/mongo/client/dbclient_cursor.cpp @@ -95,51 +95,54 @@ Message DBClientCursor::_assembleInit() { } // If we haven't gotten a cursorId yet, we need to issue a new query or command. - // The caller supplies a 'query' object which may have $-prefixed directives in the format - // expected for a legacy OP_QUERY. Therefore, we use the legacy parsing code supplied by - // query_request_helper. When actually issuing the request to the remote node, we will - // assemble a find command. - auto findCommand = query_request_helper::fromLegacyQuery( - _nsOrUuid, query, fieldsToReturn ? *fieldsToReturn : BSONObj(), nToSkip, opts); + // The caller supplies a filter and a query settings object which may have $-prefixed directives + // in the format previously expected for a legacy OP_QUERY. Therefore, we use the legacy parsing + // code supplied by query_request_helper. When actually issuing the request to the remote node, + // we will assemble a find command. + auto findCommand = + query_request_helper::fromLegacyQuery(_nsOrUuid, + _filter, + _querySettings, + fieldsToReturn ? *fieldsToReturn : BSONObj(), + nToSkip, + opts); // If there was a problem building the query request, report that. uassertStatusOK(findCommand.getStatus()); - // Despite the request being generated using the legacy OP_QUERY format above, we will never set - // the 'ntoreturn' parameter on the find command request, since this is an OP_QUERY-specific - // concept. Instead, we always use 'batchSize' and 'limit', which are provided separately to us - // by the client. if (limit) { findCommand.getValue()->setLimit(limit); } if (batchSize) { findCommand.getValue()->setBatchSize(batchSize); } - if (query.getBoolField("$readOnce")) { + + const BSONObj querySettings = _querySettings.getFullSettingsDeprecated(); + if (querySettings.getBoolField("$readOnce")) { // Legacy queries don't handle readOnce. findCommand.getValue()->setReadOnce(true); } - if (query.getBoolField(FindCommandRequest::kRequestResumeTokenFieldName)) { + if (querySettings.getBoolField(FindCommandRequest::kRequestResumeTokenFieldName)) { // Legacy queries don't handle requestResumeToken. findCommand.getValue()->setRequestResumeToken(true); } - if (query.hasField(FindCommandRequest::kResumeAfterFieldName)) { + if (querySettings.hasField(FindCommandRequest::kResumeAfterFieldName)) { // Legacy queries don't handle resumeAfter. findCommand.getValue()->setResumeAfter( - query.getObjectField(FindCommandRequest::kResumeAfterFieldName)); + querySettings.getObjectField(FindCommandRequest::kResumeAfterFieldName)); } - if (auto replTerm = query[FindCommandRequest::kTermFieldName]) { + if (auto replTerm = querySettings[FindCommandRequest::kTermFieldName]) { // Legacy queries don't handle term. findCommand.getValue()->setTerm(replTerm.numberLong()); } // Legacy queries don't handle readConcern. // We prioritize the readConcern parsed from the query object over '_readConcernObj'. - if (auto readConcern = query[repl::ReadConcernArgs::kReadConcernFieldName]) { + if (auto readConcern = querySettings[repl::ReadConcernArgs::kReadConcernFieldName]) { findCommand.getValue()->setReadConcern(readConcern.Obj()); } else if (_readConcernObj) { findCommand.getValue()->setReadConcern(_readConcernObj); } BSONObj cmd = findCommand.getValue()->toBSON(BSONObj()); - if (auto readPref = query["$readPreference"]) { + if (auto readPref = querySettings["$readPreference"]) { // FindCommandRequest doesn't handle $readPreference. cmd = BSONObjBuilder(std::move(cmd)).append(readPref).obj(); } @@ -375,7 +378,8 @@ void DBClientCursor::attach(AScopedConnection* conn) { DBClientCursor::DBClientCursor(DBClientBase* client, const NamespaceStringOrUUID& nsOrUuid, - const BSONObj& query, + const BSONObj& filter, + const Query& querySettings, int limit, int nToSkip, const BSONObj* fieldsToReturn, @@ -384,7 +388,8 @@ DBClientCursor::DBClientCursor(DBClientBase* client, boost::optional<BSONObj> readConcernObj) : DBClientCursor(client, nsOrUuid, - query, + filter, + querySettings, 0, // cursorId limit, nToSkip, @@ -405,7 +410,8 @@ DBClientCursor::DBClientCursor(DBClientBase* client, boost::optional<BSONObj> postBatchResumeToken) : DBClientCursor(client, nsOrUuid, - BSONObj(), // query + BSONObj(), // filter + Query(), // querySettings cursorId, limit, 0, // nToSkip @@ -419,7 +425,8 @@ DBClientCursor::DBClientCursor(DBClientBase* client, DBClientCursor::DBClientCursor(DBClientBase* client, const NamespaceStringOrUUID& nsOrUuid, - const BSONObj& query, + const BSONObj& filter, + const Query& querySettings, long long cursorId, int limit, int nToSkip, @@ -435,7 +442,8 @@ DBClientCursor::DBClientCursor(DBClientBase* client, _originalHost(_client->getServerAddress()), _nsOrUuid(nsOrUuid), ns(nsOrUuid.nss() ? *nsOrUuid.nss() : NamespaceString(nsOrUuid.dbname())), - query(query), + _filter(filter), + _querySettings(querySettings), limit(limit), nToSkip(nToSkip), fieldsToReturn(fieldsToReturn), diff --git a/src/mongo/client/dbclient_cursor.h b/src/mongo/client/dbclient_cursor.h index b803207a08f..94c541e81f3 100644 --- a/src/mongo/client/dbclient_cursor.h +++ b/src/mongo/client/dbclient_cursor.h @@ -31,6 +31,7 @@ #include <stack> +#include "mongo/client/query.h" #include "mongo/db/dbmessage.h" #include "mongo/db/jsobj.h" #include "mongo/db/json.h" @@ -137,7 +138,8 @@ public: DBClientCursor(DBClientBase* client, const NamespaceStringOrUUID& nsOrUuid, - const BSONObj& query, + const BSONObj& filter, + const Query& querySettings, int limit, int nToSkip, const BSONObj* fieldsToReturn, @@ -268,7 +270,8 @@ protected: private: DBClientCursor(DBClientBase* client, const NamespaceStringOrUUID& nsOrUuid, - const BSONObj& query, + const BSONObj& filter, + const Query& querySettings, long long cursorId, int limit, int nToSkip, @@ -287,7 +290,8 @@ private: // After a successful 'find' command, 'ns' is updated to contain the namespace returned by that // command. NamespaceString ns; - BSONObj query; + BSONObj _filter; + Query _querySettings; int limit; int nToSkip; const BSONObj* fieldsToReturn; diff --git a/src/mongo/client/dbclient_cursor_test.cpp b/src/mongo/client/dbclient_cursor_test.cpp index 39a62eda882..ab3a1d363d5 100644 --- a/src/mongo/client/dbclient_cursor_test.cpp +++ b/src/mongo/client/dbclient_cursor_test.cpp @@ -153,7 +153,8 @@ TEST_F(DBClientCursorTest, DBClientCursorCallsMetaDataReaderOncePerBatch) { // Set up the DBClientCursor and a mock client connection. DBClientConnectionForTest conn; const NamespaceString nss("test", "coll"); - DBClientCursor cursor(&conn, NamespaceStringOrUUID(nss), fromjson("{}"), 0, 0, nullptr, 0, 0); + DBClientCursor cursor( + &conn, NamespaceStringOrUUID(nss), BSONObj{}, Query(), 0, 0, nullptr, 0, 0); cursor.setBatchSize(2); // Set up mock 'find' response. @@ -199,8 +200,15 @@ TEST_F(DBClientCursorTest, DBClientCursorHandlesOpMsgExhaustCorrectly) { // Set up the DBClientCursor and a mock client connection. DBClientConnectionForTest conn; const NamespaceString nss("test", "coll"); - DBClientCursor cursor( - &conn, NamespaceStringOrUUID(nss), fromjson("{}"), 0, 0, nullptr, QueryOption_Exhaust, 0); + DBClientCursor cursor(&conn, + NamespaceStringOrUUID(nss), + BSONObj{}, + Query(), + 0, + 0, + nullptr, + QueryOption_Exhaust, + 0); cursor.setBatchSize(0); // Set up mock 'find' response. @@ -263,8 +271,15 @@ TEST_F(DBClientCursorTest, DBClientCursorResendsGetMoreIfMoreToComeFlagIsOmitted // Set up the DBClientCursor and a mock client connection. DBClientConnectionForTest conn; const NamespaceString nss("test", "coll"); - DBClientCursor cursor( - &conn, NamespaceStringOrUUID(nss), fromjson("{}"), 0, 0, nullptr, QueryOption_Exhaust, 0); + DBClientCursor cursor(&conn, + NamespaceStringOrUUID(nss), + BSONObj{}, + Query(), + 0, + 0, + nullptr, + QueryOption_Exhaust, + 0); cursor.setBatchSize(0); // Set up mock 'find' response. @@ -348,8 +363,15 @@ TEST_F(DBClientCursorTest, DBClientCursorMoreThrowsExceptionOnNonOKResponse) { // Set up the DBClientCursor and a mock client connection. DBClientConnectionForTest conn; const NamespaceString nss("test", "coll"); - DBClientCursor cursor( - &conn, NamespaceStringOrUUID(nss), fromjson("{}"), 0, 0, nullptr, QueryOption_Exhaust, 0); + DBClientCursor cursor(&conn, + NamespaceStringOrUUID(nss), + BSONObj{}, + Query(), + 0, + 0, + nullptr, + QueryOption_Exhaust, + 0); cursor.setBatchSize(0); // Set up mock 'find' response. @@ -380,8 +402,15 @@ TEST_F(DBClientCursorTest, DBClientCursorMoreThrowsExceptionWhenMoreToComeFlagSe // Set up the DBClientCursor and a mock client connection. DBClientConnectionForTest conn; const NamespaceString nss("test", "coll"); - DBClientCursor cursor( - &conn, NamespaceStringOrUUID(nss), fromjson("{}"), 0, 0, nullptr, QueryOption_Exhaust, 0); + DBClientCursor cursor(&conn, + NamespaceStringOrUUID(nss), + BSONObj{}, + Query(), + 0, + 0, + nullptr, + QueryOption_Exhaust, + 0); cursor.setBatchSize(0); // Set up mock 'find' response. @@ -416,7 +445,8 @@ TEST_F(DBClientCursorTest, DBClientCursorPassesReadOnceFlag) { const NamespaceString nss("test", "coll"); DBClientCursor cursor(&conn, NamespaceStringOrUUID(nss), - QUERY("query" << BSONObj() << "$readOnce" << true).obj, + BSONObj{}, + Query().readOnce(true), 0, 0, nullptr, @@ -448,9 +478,8 @@ TEST_F(DBClientCursorTest, DBClientCursorPassesResumeFields) { const NamespaceString nss("test", "coll"); DBClientCursor cursor(&conn, NamespaceStringOrUUID(nss), - QUERY("query" << BSONObj() << "$_requestResumeToken" << true - << "$_resumeAfter" << BSON("$recordId" << 5LL)) - .obj, + BSONObj{}, + Query().requestResumeToken(true).resumeAfter(BSON("$recordId" << 5LL)), 0, 0, nullptr, @@ -490,7 +519,8 @@ TEST_F(DBClientCursorTest, DBClientCursorTailable) { const NamespaceString nss("test", "coll"); DBClientCursor cursor(&conn, NamespaceStringOrUUID(nss), - fromjson("{}"), + BSONObj{}, + Query(), 0, 0, nullptr, @@ -589,7 +619,8 @@ TEST_F(DBClientCursorTest, DBClientCursorTailableAwaitData) { const NamespaceString nss("test", "coll"); DBClientCursor cursor(&conn, NamespaceStringOrUUID(nss), - fromjson("{}"), + BSONObj{}, + Query(), 0, 0, nullptr, @@ -655,7 +686,8 @@ TEST_F(DBClientCursorTest, DBClientCursorTailableAwaitDataExhaust) { const NamespaceString nss("test", "coll"); DBClientCursor cursor(&conn, NamespaceStringOrUUID(nss), - fromjson("{}"), + BSONObj{}, + Query(), 0, 0, nullptr, @@ -805,12 +837,11 @@ TEST_F(DBClientCursorTest, DBClientCursorOplogQuery) { const BSONObj readConcernObj = BSON("afterClusterTime" << Timestamp(0, 1)); const long long maxTimeMS = 5000LL; const long long term = 5; - const auto oplogQuery = QUERY("query" << filterObj << "readConcern" << readConcernObj - << "$maxTimeMS" << maxTimeMS << "term" << term); DBClientCursor cursor(&conn, NamespaceStringOrUUID(nss), - oplogQuery.obj, + filterObj, + Query().readConcern(readConcernObj).maxTimeMS(maxTimeMS).term(term), 0, 0, nullptr, diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp index 82457ebd7ac..e434b4634f9 100644 --- a/src/mongo/client/dbclient_rs.cpp +++ b/src/mongo/client/dbclient_rs.cpp @@ -105,14 +105,15 @@ const size_t MAX_RETRY = 3; * * @throws AssertionException if the read preference object is malformed */ -std::unique_ptr<ReadPreferenceSetting> _extractReadPref(const BSONObj& query, int queryOptions) { +std::unique_ptr<ReadPreferenceSetting> _extractReadPref(const Query& querySettings, + int queryOptions) { // Default read pref is primary only or secondary preferred with secondaryOK const auto defaultReadPref = queryOptions & QueryOption_SecondaryOk ? ReadPreference::SecondaryPreferred : ReadPreference::PrimaryOnly; - auto readPrefContainingObj = query; - if (auto elem = query["$queryOptions"]) { + BSONObj readPrefContainingObj = querySettings.getFullSettingsDeprecated(); + if (auto elem = readPrefContainingObj["$queryOptions"]) { // The readPreference is embedded in the $queryOptions field. readPrefContainingObj = elem.Obj(); } @@ -248,7 +249,7 @@ bool _isSecondaryCommand(StringData commandName, const BSONObj& commandArgs) { // Internal implementation of isSecondaryQuery, takes previously-parsed read preference bool _isSecondaryQuery(const string& ns, - const BSONObj& queryObj, + const BSONObj& filter, const ReadPreferenceSetting& readPref) { // If the read pref is primary only, this is not a secondary query if (readPref.pref == ReadPreference::PrimaryOnly) @@ -261,29 +262,12 @@ bool _isSecondaryQuery(const string& ns, // This is a command with secondary-possible read pref // Only certain commands are supported for secondary operation. - BSONObj actualQueryObj; - if (strcmp(queryObj.firstElement().fieldName(), "$query") == 0) { - actualQueryObj = queryObj["$query"].embeddedObject(); - } else if (strcmp(queryObj.firstElement().fieldName(), "query") == 0) { - actualQueryObj = queryObj["query"].embeddedObject(); - } else { - actualQueryObj = queryObj; - } - - StringData commandName = actualQueryObj.firstElementFieldName(); - return _isSecondaryCommand(commandName, actualQueryObj); + StringData commandName = filter.firstElementFieldName(); + return _isSecondaryCommand(commandName, filter); } } // namespace - -bool DBClientReplicaSet::isSecondaryQuery(const string& ns, - const BSONObj& queryObj, - int queryOptions) { - unique_ptr<ReadPreferenceSetting> readPref(_extractReadPref(queryObj, queryOptions)); - return _isSecondaryQuery(ns, queryObj, *readPref); -} - DBClientConnection* DBClientReplicaSet::checkPrimary() { ReplicaSetMonitorPtr monitor = _getMonitor(); HostAndPort h = monitor->getPrimaryOrUassert(); @@ -545,24 +529,25 @@ void DBClientReplicaSet::insert(const string& ns, } void DBClientReplicaSet::remove(const string& ns, - Query obj, + const BSONObj& filter, bool removeMany, boost::optional<BSONObj> writeConcernObj) { - checkPrimary()->remove(ns, obj, removeMany, writeConcernObj); + checkPrimary()->remove(ns, filter, removeMany, writeConcernObj); } unique_ptr<DBClientCursor> DBClientReplicaSet::query(const NamespaceStringOrUUID& nsOrUuid, - Query query, + const BSONObj& filter, + const Query& querySettings, int limit, int nToSkip, const BSONObj* fieldsToReturn, int queryOptions, int batchSize, boost::optional<BSONObj> readConcernObj) { - shared_ptr<ReadPreferenceSetting> readPref(_extractReadPref(query.obj, queryOptions)); + shared_ptr<ReadPreferenceSetting> readPref(_extractReadPref(querySettings, queryOptions)); invariant(nsOrUuid.nss()); const string ns = nsOrUuid.nss()->ns(); - if (_isSecondaryQuery(ns, query.obj, *readPref)) { + if (_isSecondaryQuery(ns, filter, *readPref)) { LOGV2_DEBUG(20133, 3, "dbclient_rs query using secondary or tagged node selection in {replicaSet}, " @@ -588,7 +573,8 @@ unique_ptr<DBClientCursor> DBClientReplicaSet::query(const NamespaceStringOrUUID } unique_ptr<DBClientCursor> cursor = conn->query(nsOrUuid, - query, + filter, + querySettings, limit, nToSkip, fieldsToReturn, @@ -620,17 +606,25 @@ unique_ptr<DBClientCursor> DBClientReplicaSet::query(const NamespaceStringOrUUID "dbclient_rs query to primary node", "replicaSet"_attr = _getMonitor()->getName()); - return checkPrimary()->query( - nsOrUuid, query, limit, nToSkip, fieldsToReturn, queryOptions, batchSize, readConcernObj); + return checkPrimary()->query(nsOrUuid, + filter, + querySettings, + limit, + nToSkip, + fieldsToReturn, + queryOptions, + batchSize, + readConcernObj); } BSONObj DBClientReplicaSet::findOne(const string& ns, - const Query& query, + const BSONObj& filter, + const Query& querySettings, const BSONObj* fieldsToReturn, int queryOptions, boost::optional<BSONObj> readConcernObj) { - shared_ptr<ReadPreferenceSetting> readPref(_extractReadPref(query.obj, queryOptions)); - if (_isSecondaryQuery(ns, query.obj, *readPref)) { + shared_ptr<ReadPreferenceSetting> readPref(_extractReadPref(querySettings, queryOptions)); + if (_isSecondaryQuery(ns, filter, *readPref)) { LOGV2_DEBUG(20135, 3, "dbclient_rs findOne using secondary or tagged node selection in {replicaSet}, " @@ -655,7 +649,8 @@ BSONObj DBClientReplicaSet::findOne(const string& ns, break; } - return conn->findOne(ns, query, fieldsToReturn, queryOptions, readConcernObj); + return conn->findOne( + ns, filter, querySettings, fieldsToReturn, queryOptions, readConcernObj); } catch (const DBException& ex) { const Status status = ex.toStatus(str::stream() << "can't findone replica set node " << _lastSecondaryOkHost.toString()); @@ -679,7 +674,8 @@ BSONObj DBClientReplicaSet::findOne(const string& ns, "dbclient_rs findOne to primary node", "replicaSet"_attr = _getMonitor()->getName()); - return checkPrimary()->findOne(ns, query, fieldsToReturn, queryOptions, readConcernObj); + return checkPrimary()->findOne( + ns, filter, querySettings, fieldsToReturn, queryOptions, readConcernObj); } void DBClientReplicaSet::killCursor(const NamespaceString& ns, long long cursorID) { @@ -833,7 +829,8 @@ void DBClientReplicaSet::say(Message& toSend, bool isRetry, string* actualServer DbMessage dm(toSend); QueryMessage qm(dm); - shared_ptr<ReadPreferenceSetting> readPref(_extractReadPref(qm.query, qm.queryOptions)); + shared_ptr<ReadPreferenceSetting> readPref( + _extractReadPref(Query::fromBSONDeprecated(qm.query), qm.queryOptions)); if (_isSecondaryQuery(qm.ns, qm.query, *readPref)) { LOGV2_DEBUG(20141, 3, @@ -998,7 +995,8 @@ bool DBClientReplicaSet::call(Message& toSend, QueryMessage qm(dm); ns = qm.ns; - shared_ptr<ReadPreferenceSetting> readPref(_extractReadPref(qm.query, qm.queryOptions)); + shared_ptr<ReadPreferenceSetting> readPref( + _extractReadPref(Query::fromBSONDeprecated(qm.query), qm.queryOptions)); if (_isSecondaryQuery(ns, qm.query, *readPref)) { LOGV2_DEBUG( 20145, diff --git a/src/mongo/client/dbclient_rs.h b/src/mongo/client/dbclient_rs.h index ad2dbcbe4a9..7dd53d30448 100644 --- a/src/mongo/client/dbclient_rs.h +++ b/src/mongo/client/dbclient_rs.h @@ -91,7 +91,8 @@ public: /** throws userassertion "no primary found" */ std::unique_ptr<DBClientCursor> query( const NamespaceStringOrUUID& nsOrUuid, - Query query, + const BSONObj& filter, + const Query& querySettings, int limit = 0, int nToSkip = 0, const BSONObj* fieldsToReturn = nullptr, @@ -101,7 +102,8 @@ public: /** throws userassertion "no primary found" */ BSONObj findOne(const std::string& ns, - const Query& query, + const BSONObj& filter, + const Query& querySettings, const BSONObj* fieldsToReturn = nullptr, int queryOptions = 0, boost::optional<BSONObj> readConcernObj = boost::none) override; @@ -119,7 +121,7 @@ public: boost::optional<BSONObj> writeConcernObj = boost::none) override; void remove(const std::string& ns, - Query obj, + const BSONObj& filter, bool removeMany = true, boost::optional<BSONObj> writeConcernObj = boost::none) override; @@ -214,18 +216,6 @@ public: std::string* actualServer) override; /** - * Returns whether a query or command can be sent to secondaries based on the query object - * and options. - * - * @param ns the namespace of the query. - * @param queryObj the query object to check. - * @param queryOptions the query options - * - * @return true if the query/cmd could potentially be sent to a secondary, false otherwise - */ - static bool isSecondaryQuery(const std::string& ns, const BSONObj& queryObj, int queryOptions); - - /** * Performs a "soft reset" by clearing all states relating to secondary nodes and * returning secondary connections to the pool. */ diff --git a/src/mongo/client/dbclient_rs_test.cpp b/src/mongo/client/dbclient_rs_test.cpp index d098e1f54bf..c7ce251a154 100644 --- a/src/mongo/client/dbclient_rs_test.cpp +++ b/src/mongo/client/dbclient_rs_test.cpp @@ -151,11 +151,11 @@ TEST_F(BasicRS, QueryPrimary) { MockReplicaSet* replSet = getReplSet(); DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData()); - Query query; - query.readPref(mongo::ReadPreference::PrimaryOnly, BSONArray()); - // Note: IdentityNS contains the name of the server. - unique_ptr<DBClientCursor> cursor = replConn.query(NamespaceString(IdentityNS), query); + unique_ptr<DBClientCursor> cursor = + replConn.query(NamespaceString(IdentityNS), + BSONObj{}, + Query().readPref(mongo::ReadPreference::PrimaryOnly, BSONArray())); BSONObj doc = cursor->next(); ASSERT_EQUALS(replSet->getPrimary(), doc[HostField.name()].str()); } @@ -168,11 +168,11 @@ TEST_F(BasicRS, QuerySecondaryOnly) { MockReplicaSet* replSet = getReplSet(); DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData()); - Query query; - query.readPref(mongo::ReadPreference::SecondaryOnly, BSONArray()); - // Note: IdentityNS contains the name of the server. - unique_ptr<DBClientCursor> cursor = replConn.query(NamespaceString(IdentityNS), query); + unique_ptr<DBClientCursor> cursor = + replConn.query(NamespaceString(IdentityNS), + BSONObj{}, + Query().readPref(mongo::ReadPreference::SecondaryOnly, BSONArray())); BSONObj doc = cursor->next(); ASSERT_EQUALS(replSet->getSecondaries().front(), doc[HostField.name()].str()); } @@ -186,11 +186,11 @@ TEST_F(BasicRS, QueryPrimaryPreferred) { MockReplicaSet* replSet = getReplSet(); DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData()); - Query query; - query.readPref(mongo::ReadPreference::PrimaryPreferred, BSONArray()); - // Note: IdentityNS contains the name of the server. - unique_ptr<DBClientCursor> cursor = replConn.query(NamespaceString(IdentityNS), query); + unique_ptr<DBClientCursor> cursor = + replConn.query(NamespaceString(IdentityNS), + BSONObj{}, + Query().readPref(mongo::ReadPreference::PrimaryPreferred, BSONArray())); BSONObj doc = cursor->next(); ASSERT_EQUALS(replSet->getPrimary(), doc[HostField.name()].str()); } @@ -203,11 +203,11 @@ TEST_F(BasicRS, QuerySecondaryPreferred) { MockReplicaSet* replSet = getReplSet(); DBClientReplicaSet replConn(replSet->getSetName(), replSet->getHosts(), StringData()); - Query query; - query.readPref(mongo::ReadPreference::SecondaryPreferred, BSONArray()); - // Note: IdentityNS contains the name of the server. - unique_ptr<DBClientCursor> cursor = replConn.query(NamespaceString(IdentityNS), query); + unique_ptr<DBClientCursor> cursor = + replConn.query(NamespaceString(IdentityNS), + BSONObj{}, + Query().readPref(mongo::ReadPreference::SecondaryPreferred, BSONArray())); BSONObj doc = cursor->next(); ASSERT_EQUALS(replSet->getSecondaries().front(), doc[HostField.name()].str()); } @@ -271,7 +271,8 @@ TEST_F(AllNodesDown, QueryPrimary) { Query query; query.readPref(mongo::ReadPreference::PrimaryOnly, BSONArray()); - ASSERT_THROWS(replConn.query(NamespaceString(IdentityNS), query), AssertionException); + ASSERT_THROWS(replConn.query(NamespaceString(IdentityNS), BSONObj{}, query), + AssertionException); } TEST_F(AllNodesDown, CommandPrimary) { @@ -284,7 +285,8 @@ TEST_F(AllNodesDown, QuerySecondaryOnly) { Query query; query.readPref(mongo::ReadPreference::SecondaryOnly, BSONArray()); - ASSERT_THROWS(replConn.query(NamespaceString(IdentityNS), query), AssertionException); + ASSERT_THROWS(replConn.query(NamespaceString(IdentityNS), BSONObj{}, query), + AssertionException); } TEST_F(AllNodesDown, CommandSecondaryOnly) { @@ -297,7 +299,8 @@ TEST_F(AllNodesDown, QueryPrimaryPreferred) { Query query; query.readPref(mongo::ReadPreference::PrimaryPreferred, BSONArray()); - ASSERT_THROWS(replConn.query(NamespaceString(IdentityNS), query), AssertionException); + ASSERT_THROWS(replConn.query(NamespaceString(IdentityNS), BSONObj{}, query), + AssertionException); } TEST_F(AllNodesDown, CommandPrimaryPreferred) { @@ -310,7 +313,8 @@ TEST_F(AllNodesDown, QuerySecondaryPreferred) { Query query; query.readPref(mongo::ReadPreference::SecondaryPreferred, BSONArray()); - ASSERT_THROWS(replConn.query(NamespaceString(IdentityNS), query), AssertionException); + ASSERT_THROWS(replConn.query(NamespaceString(IdentityNS), BSONObj{}, query), + AssertionException); } TEST_F(AllNodesDown, CommandSecondaryPreferred) { @@ -323,7 +327,8 @@ TEST_F(AllNodesDown, QueryNearest) { Query query; query.readPref(mongo::ReadPreference::Nearest, BSONArray()); - ASSERT_THROWS(replConn.query(NamespaceString(IdentityNS), query), AssertionException); + ASSERT_THROWS(replConn.query(NamespaceString(IdentityNS), BSONObj{}, query), + AssertionException); } TEST_F(AllNodesDown, CommandNearest) { @@ -369,7 +374,8 @@ TEST_F(PrimaryDown, QueryPrimary) { Query query; query.readPref(mongo::ReadPreference::PrimaryOnly, BSONArray()); - ASSERT_THROWS(replConn.query(NamespaceString(IdentityNS), query), AssertionException); + ASSERT_THROWS(replConn.query(NamespaceString(IdentityNS), BSONObj{}, query), + AssertionException); } TEST_F(PrimaryDown, CommandPrimary) { @@ -384,7 +390,8 @@ TEST_F(PrimaryDown, QuerySecondaryOnly) { query.readPref(mongo::ReadPreference::SecondaryOnly, BSONArray()); // Note: IdentityNS contains the name of the server. - unique_ptr<DBClientCursor> cursor = replConn.query(NamespaceString(IdentityNS), query); + unique_ptr<DBClientCursor> cursor = + replConn.query(NamespaceString(IdentityNS), BSONObj{}, query); BSONObj doc = cursor->next(); ASSERT_EQUALS(replSet->getSecondaries().front(), doc[HostField.name()].str()); } @@ -402,7 +409,8 @@ TEST_F(PrimaryDown, QueryPrimaryPreferred) { query.readPref(mongo::ReadPreference::PrimaryPreferred, BSONArray()); // Note: IdentityNS contains the name of the server. - unique_ptr<DBClientCursor> cursor = replConn.query(NamespaceString(IdentityNS), query); + unique_ptr<DBClientCursor> cursor = + replConn.query(NamespaceString(IdentityNS), BSONObj{}, query); BSONObj doc = cursor->next(); ASSERT_EQUALS(replSet->getSecondaries().front(), doc[HostField.name()].str()); } @@ -420,7 +428,8 @@ TEST_F(PrimaryDown, QuerySecondaryPreferred) { query.readPref(mongo::ReadPreference::SecondaryPreferred, BSONArray()); // Note: IdentityNS contains the name of the server. - unique_ptr<DBClientCursor> cursor = replConn.query(NamespaceString(IdentityNS), query); + unique_ptr<DBClientCursor> cursor = + replConn.query(NamespaceString(IdentityNS), BSONObj{}, query); BSONObj doc = cursor->next(); ASSERT_EQUALS(replSet->getSecondaries().front(), doc[HostField.name()].str()); } @@ -436,7 +445,8 @@ TEST_F(PrimaryDown, Nearest) { Query query; query.readPref(mongo::ReadPreference::Nearest, BSONArray()); - unique_ptr<DBClientCursor> cursor = replConn.query(NamespaceString(IdentityNS), query); + unique_ptr<DBClientCursor> cursor = + replConn.query(NamespaceString(IdentityNS), BSONObj{}, query); BSONObj doc = cursor->next(); ASSERT_EQUALS(replSet->getSecondaries().front(), doc[HostField.name()].str()); } @@ -483,7 +493,8 @@ TEST_F(SecondaryDown, QueryPrimary) { query.readPref(mongo::ReadPreference::PrimaryOnly, BSONArray()); // Note: IdentityNS contains the name of the server. - unique_ptr<DBClientCursor> cursor = replConn.query(NamespaceString(IdentityNS), query); + unique_ptr<DBClientCursor> cursor = + replConn.query(NamespaceString(IdentityNS), BSONObj{}, query); BSONObj doc = cursor->next(); ASSERT_EQUALS(replSet->getPrimary(), doc[HostField.name()].str()); } @@ -498,7 +509,8 @@ TEST_F(SecondaryDown, QuerySecondaryOnly) { Query query; query.readPref(mongo::ReadPreference::SecondaryOnly, BSONArray()); - ASSERT_THROWS(replConn.query(NamespaceString(IdentityNS), query), AssertionException); + ASSERT_THROWS(replConn.query(NamespaceString(IdentityNS), BSONObj{}, query), + AssertionException); } TEST_F(SecondaryDown, CommandSecondaryOnly) { @@ -513,7 +525,8 @@ TEST_F(SecondaryDown, QueryPrimaryPreferred) { query.readPref(mongo::ReadPreference::PrimaryPreferred, BSONArray()); // Note: IdentityNS contains the name of the server. - unique_ptr<DBClientCursor> cursor = replConn.query(NamespaceString(IdentityNS), query); + unique_ptr<DBClientCursor> cursor = + replConn.query(NamespaceString(IdentityNS), BSONObj{}, query); BSONObj doc = cursor->next(); ASSERT_EQUALS(replSet->getPrimary(), doc[HostField.name()].str()); } @@ -530,7 +543,8 @@ TEST_F(SecondaryDown, QuerySecondaryPreferred) { query.readPref(mongo::ReadPreference::SecondaryPreferred, BSONArray()); // Note: IdentityNS contains the name of the server. - unique_ptr<DBClientCursor> cursor = replConn.query(NamespaceString(IdentityNS), query); + unique_ptr<DBClientCursor> cursor = + replConn.query(NamespaceString(IdentityNS), BSONObj{}, query); BSONObj doc = cursor->next(); ASSERT_EQUALS(replSet->getPrimary(), doc[HostField.name()].str()); } @@ -547,7 +561,8 @@ TEST_F(SecondaryDown, QueryNearest) { query.readPref(mongo::ReadPreference::Nearest, BSONArray()); // Note: IdentityNS contains the name of the server. - unique_ptr<DBClientCursor> cursor = replConn.query(NamespaceString(IdentityNS), query); + unique_ptr<DBClientCursor> cursor = + replConn.query(NamespaceString(IdentityNS), BSONObj{}, query); BSONObj doc = cursor->next(); ASSERT_EQUALS(replSet->getPrimary(), doc[HostField.name()].str()); } @@ -698,7 +713,8 @@ TEST_F(TaggedFiveMemberRS, ConnShouldPinIfSameSettings) { query.readPref(mongo::ReadPreference::PrimaryPreferred, BSONArray()); // Note: IdentityNS contains the name of the server. - unique_ptr<DBClientCursor> cursor = replConn.query(NamespaceString(IdentityNS), query); + unique_ptr<DBClientCursor> cursor = + replConn.query(NamespaceString(IdentityNS), BSONObj{}, query); BSONObj doc = cursor->next(); dest = doc[HostField.name()].str(); } @@ -706,7 +722,8 @@ TEST_F(TaggedFiveMemberRS, ConnShouldPinIfSameSettings) { { Query query; query.readPref(mongo::ReadPreference::PrimaryPreferred, BSONArray()); - unique_ptr<DBClientCursor> cursor = replConn.query(NamespaceString(IdentityNS), query); + unique_ptr<DBClientCursor> cursor = + replConn.query(NamespaceString(IdentityNS), BSONObj{}, query); BSONObj doc = cursor->next(); const string newDest = doc[HostField.name()].str(); ASSERT_EQUALS(dest, newDest); @@ -726,7 +743,8 @@ TEST_F(TaggedFiveMemberRS, ConnShouldNotPinIfHostMarkedAsFailed) { query.readPref(mongo::ReadPreference::PrimaryPreferred, BSONArray()); // Note: IdentityNS contains the name of the server. - unique_ptr<DBClientCursor> cursor = replConn.query(NamespaceString(IdentityNS), query); + unique_ptr<DBClientCursor> cursor = + replConn.query(NamespaceString(IdentityNS), BSONObj{}, query); BSONObj doc = cursor->next(); dest = doc[HostField.name()].str(); } @@ -740,7 +758,8 @@ TEST_F(TaggedFiveMemberRS, ConnShouldNotPinIfHostMarkedAsFailed) { { Query query; query.readPref(mongo::ReadPreference::PrimaryPreferred, BSONArray()); - unique_ptr<DBClientCursor> cursor = replConn.query(NamespaceString(IdentityNS), query); + unique_ptr<DBClientCursor> cursor = + replConn.query(NamespaceString(IdentityNS), BSONObj{}, query); BSONObj doc = cursor->next(); const string newDest = doc[HostField.name()].str(); ASSERT_NOT_EQUALS(dest, newDest); @@ -759,7 +778,7 @@ TEST_F(TaggedFiveMemberRS, SecondaryConnReturnsSecConn) { mongo::DBClientConnection& secConn = replConn.secondaryConn(); // Note: IdentityNS contains the name of the server. - unique_ptr<DBClientCursor> cursor = secConn.query(NamespaceString(IdentityNS), Query()); + unique_ptr<DBClientCursor> cursor = secConn.query(NamespaceString(IdentityNS), BSONObj{}); BSONObj doc = cursor->next(); dest = doc[HostField.name()].str(); ASSERT_NOT_EQUALS(dest, replSet->getPrimary()); diff --git a/src/mongo/client/query.cpp b/src/mongo/client/query.cpp index 9aa28e92711..39d7e1316e2 100644 --- a/src/mongo/client/query.cpp +++ b/src/mongo/client/query.cpp @@ -43,24 +43,6 @@ const BSONField<BSONObj> Query::ReadPrefField("$readPreference"); const BSONField<string> Query::ReadPrefModeField("mode"); const BSONField<BSONArray> Query::ReadPrefTagsField("tags"); - -Query::Query(const string& json) : obj(fromjson(json)) {} - -Query::Query(const char* json) : obj(fromjson(json)) {} - -Query& Query::hint(const string& jsonKeyPatt) { - return hint(fromjson(jsonKeyPatt)); -} - -Query& Query::where(const string& jscode, BSONObj scope) { - /* use where() before sort() and hint() and explain(), else this will assert. */ - verify(!isComplex()); - BSONObjBuilder b(std::move(obj)); - b.appendWhere(jscode, scope); - obj = b.obj(); - return *this; -} - void Query::makeComplex() { if (isComplex()) return; @@ -79,16 +61,6 @@ Query& Query::hint(BSONObj keyPattern) { return *this; } -Query& Query::minKey(const BSONObj& val) { - appendComplex("$min", val); - return *this; -} - -Query& Query::maxKey(const BSONObj& val) { - appendComplex("$max", val); - return *this; -} - bool Query::isComplex(const BSONObj& obj, bool* hasDollar) { if (obj.hasElement("query")) { if (hasDollar) @@ -105,6 +77,14 @@ bool Query::isComplex(const BSONObj& obj, bool* hasDollar) { return false; } +BSONObj Query::getFilter() const { + bool hasDollar; + if (!isComplex(&hasDollar)) + return obj; + + return obj.getObjectField(hasDollar ? "$query" : "query"); +} + Query& Query::readPref(ReadPreference pref, const BSONArray& tags) { appendComplex(ReadPrefField.name().c_str(), ReadPreferenceSetting(pref, TagSet(tags)).toInnerBSON()); @@ -115,40 +95,41 @@ bool Query::isComplex(bool* hasDollar) const { return isComplex(obj, hasDollar); } -bool Query::hasReadPreference(const BSONObj& queryObj) { - const bool hasReadPrefOption = queryObj["$queryOptions"].isABSONObj() && - queryObj["$queryOptions"].Obj().hasField(ReadPrefField.name()); - - bool canHaveReadPrefField = Query::isComplex(queryObj) || - // The find command has a '$readPreference' option. - queryObj.firstElementFieldName() == StringData("find"); - - return (canHaveReadPrefField && queryObj.hasField(ReadPrefField.name())) || hasReadPrefOption; +Query& Query::appendElements(BSONObj elements) { + makeComplex(); + BSONObjBuilder b(std::move(obj)); + b.appendElements(elements); + obj = b.obj(); + return *this; } -BSONObj Query::getFilter() const { - bool hasDollar; - if (!isComplex(&hasDollar)) - return obj; +Query& Query::requestResumeToken(bool enable) { + appendComplex("$_requestResumeToken", enable); + return *this; +} - return obj.getObjectField(hasDollar ? "$query" : "query"); +Query& Query::resumeAfter(BSONObj point) { + appendComplex("$_resumeAfter", point); + return *this; } -BSONObj Query::getSort() const { - if (!isComplex()) - return BSONObj(); - BSONObj ret = obj.getObjectField("orderby"); - if (ret.isEmpty()) - ret = obj.getObjectField("$orderby"); - return ret; + +Query& Query::maxTimeMS(long long timeout) { + appendComplex("$maxTimeMS", timeout); + return *this; } -BSONObj Query::getHint() const { - if (!isComplex()) - return BSONObj(); - return obj.getObjectField("$hint"); + +Query& Query::term(long long value) { + appendComplex("term", value); + return *this; } -string Query::toString() const { - return obj.toString(); +Query& Query::readConcern(BSONObj rc) { + appendComplex("readConcern", rc); + return *this; } +Query& Query::readOnce(bool enable) { + appendComplex("$readOnce", enable); + return *this; +} } // namespace mongo diff --git a/src/mongo/client/query.h b/src/mongo/client/query.h index 6ef2b2531c0..c4153099d65 100644 --- a/src/mongo/client/query.h +++ b/src/mongo/client/query.h @@ -33,27 +33,30 @@ #include "mongo/client/read_preference.h" #include "mongo/rpc/message.h" - namespace mongo { -/** Represents a Mongo query expression. Typically one uses the QUERY(...) macro to construct a - * Query object. - Examples: - QUERY( "age" << 33 << "school" << "UCLA" ).sort("name") - QUERY( "age" << GT << 30 << LT << 50 ) -*/ - +/** + * Represents a subset query settings, such as sort, hint, etc. and might also contain a query + * filter. The class is being evolved into statically checkable QueryOptions type that won't include + * the filter and will encompass all relevant query settings. + */ class Query { public: static const BSONField<BSONObj> ReadPrefField; static const BSONField<std::string> ReadPrefModeField; static const BSONField<BSONArray> ReadPrefTagsField; - BSONObj obj; + /** + * Creating a Query object from raw BSON is on its way out. Please don't add new callers under + * any circumstances. + */ + static Query fromBSONDeprecated(const BSONObj& b) { + Query q; + q.obj = b; + return q; + } + Query() : obj(BSONObj()) {} - Query(const BSONObj& b) : obj(b) {} - Query(const std::string& json); - Query(const char* json); /** Add a sort (ORDER BY) criteria to the query expression. @param sortPattern the sort order template. For example to order by name ascending, time @@ -82,37 +85,6 @@ public: hint("{ts:1}") */ Query& hint(BSONObj keyPattern); - Query& hint(const std::string& jsonKeyPatt); - - /** Provide min and/or max index limits for the query. - min <= x < max - */ - Query& minKey(const BSONObj& val); - /** - max is exclusive - */ - Query& maxKey(const BSONObj& val); - - /** Queries to the Mongo database support a $where parameter option which contains - a javascript function that is evaluated to see whether objects being queried match - its criteria. Use this helper to append such a function to a query object. - Your query may also contain other traditional Mongo query terms. - - @param jscode The javascript function to evaluate against each potential object - match. The function must return true for matched objects. Use the this - variable to inspect the current object. - @param scope SavedContext for the javascript object. List in a BSON object any - variables you would like defined when the jscode executes. One can think - of these as "bind variables". - - Examples: - conn.findOne("test.coll", Query("{a:3}").where("this.b == 2 || this.c == 3")); - Query badBalance = Query().where("this.debits - this.credits < 0"); - */ - Query& where(const std::string& jscode, BSONObj scope); - Query& where(const std::string& jscode) { - return where(jscode, BSONObj()); - } /** * Sets the read preference for this query. @@ -122,27 +94,39 @@ public: */ Query& readPref(ReadPreference pref, const BSONArray& tags); - /** - * @return true if this query has an orderby, hint, or some other field - */ - bool isComplex(bool* hasDollar = nullptr) const; - static bool isComplex(const BSONObj& obj, bool* hasDollar = nullptr); - BSONObj getFilter() const; - BSONObj getSort() const; - BSONObj getHint() const; /** - * @return true if the query object contains a read preference specification object. + * A temporary accessor that returns a reference to the internal BSON object. No new callers + * should be introduced! + * NB: must be implemented in the header because db/query/query_request cannot link against + * client/client_query. */ - static bool hasReadPreference(const BSONObj& queryObj); - - std::string toString() const; - operator std::string() const { - return toString(); + const BSONObj& getFullSettingsDeprecated() const { + return obj; } + /** + * The setters below were added to make the contents of the Query's settings internal BSON + * explicit. They will be reviewed and deprecated/removed as appropriate. + */ + Query& appendElements(BSONObj elements); + Query& requestResumeToken(bool enable); + Query& resumeAfter(BSONObj point); + Query& maxTimeMS(long long timeout); + Query& term(long long value); + Query& readConcern(BSONObj rc); + Query& readOnce(bool enable); + private: + BSONObj obj; + + /** + * @return true if this query has an orderby, hint, or some other field + */ + bool isComplex(bool* hasDollar = nullptr) const; + static bool isComplex(const BSONObj& obj, bool* hasDollar = nullptr); + void makeComplex(); template <class T> void appendComplex(const char* fieldName, const T& val) { @@ -154,12 +138,7 @@ private: }; inline std::ostream& operator<<(std::ostream& s, const Query& q) { - return s << q.toString(); + return s << q.getFullSettingsDeprecated().toString(); } -/** Typically one uses the QUERY(...) macro to construct a Query object. -Example: QUERY( "age" << 33 << "school" << "UCLA" ) -*/ -#define QUERY(x) ::mongo::Query(BSON(x)) - } // namespace mongo diff --git a/src/mongo/db/auth/authz_manager_external_state_d.cpp b/src/mongo/db/auth/authz_manager_external_state_d.cpp index 66f8e74215d..e8dc4cdda7a 100644 --- a/src/mongo/db/auth/authz_manager_external_state_d.cpp +++ b/src/mongo/db/auth/authz_manager_external_state_d.cpp @@ -60,12 +60,12 @@ AuthzManagerExternalStateMongod::makeAuthzSessionExternalState(AuthorizationMana Status AuthzManagerExternalStateMongod::query( OperationContext* opCtx, const NamespaceString& collectionName, - const BSONObj& query, + const BSONObj& filter, const BSONObj& projection, const std::function<void(const BSONObj&)>& resultProcessor) { try { DBDirectClient client(opCtx); - client.query(resultProcessor, collectionName, query, &projection); + client.query(resultProcessor, collectionName, filter, Query(), &projection); return Status::OK(); } catch (const DBException& e) { return e.toStatus(); diff --git a/src/mongo/db/auth/authz_manager_external_state_d.h b/src/mongo/db/auth/authz_manager_external_state_d.h index d1fedc3be8a..62bd7a08fce 100644 --- a/src/mongo/db/auth/authz_manager_external_state_d.h +++ b/src/mongo/db/auth/authz_manager_external_state_d.h @@ -62,7 +62,7 @@ public: const BSONObj& query) final; Status query(OperationContext* opCtx, const NamespaceString& collectionName, - const BSONObj& query, + const BSONObj& filter, const BSONObj& projection, const std::function<void(const BSONObj&)>& resultProcessor) final; }; diff --git a/src/mongo/db/auth/authz_manager_external_state_local.h b/src/mongo/db/auth/authz_manager_external_state_local.h index 31fd864684e..4f2807b6b57 100644 --- a/src/mongo/db/auth/authz_manager_external_state_local.h +++ b/src/mongo/db/auth/authz_manager_external_state_local.h @@ -101,12 +101,12 @@ public: const BSONObj& query) = 0; /** - * Finds all documents matching "query" in "collectionName". For each document returned, + * Finds all documents matching "filter" in "collectionName". For each document returned, * calls the function resultProcessor on it. */ virtual Status query(OperationContext* opCtx, const NamespaceString& collectionName, - const BSONObj& query, + const BSONObj& filter, const BSONObj& projection, const std::function<void(const BSONObj&)>& resultProcessor) = 0; diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp index e0ff1859e51..41d4ae540b3 100644 --- a/src/mongo/db/cloner.cpp +++ b/src/mongo/db/cloner.cpp @@ -226,21 +226,20 @@ struct Cloner::Fun { time_t saveLast; }; -/* copy the specified collection +/** + * Copy the specified collection. */ void Cloner::_copy(OperationContext* opCtx, const std::string& toDBName, const NamespaceString& nss, const BSONObj& from_opts, const BSONObj& from_id_index, - Query query, DBClientBase* conn) { LOGV2_DEBUG(20414, 2, - "\t\tcloning collection with filter", + "\t\tcloning collection", "ns"_attr = nss, - "conn_getServerAddress"_attr = conn->getServerAddress(), - "query"_attr = redact(query.toString())); + "conn_getServerAddress"_attr = conn->getServerAddress()); Fun f(opCtx, toDBName); f.numSeen = 0; @@ -254,7 +253,8 @@ void Cloner::_copy(OperationContext* opCtx, Lock::TempRelease tempRelease(opCtx->lockState()); conn->query(std::function<void(DBClientCursorBatchIterator&)>(f), nss, - query, + BSONObj{} /* filter */, + Query() /* querySettings */, nullptr, options, 0 /* batchSize */, @@ -262,8 +262,7 @@ void Cloner::_copy(OperationContext* opCtx, } uassert(ErrorCodes::PrimarySteppedDown, - str::stream() << "Not primary while cloning collection " << nss.ns() << " with filter " - << query.toString(), + str::stream() << "Not primary while cloning collection " << nss.ns(), !opCtx->writesAreReplicated() || repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)); } @@ -582,7 +581,6 @@ Status Cloner::copyDb(OperationContext* opCtx, nss, params.collectionInfo["options"].Obj(), params.idIndexSpec, - Query(), conn.get()); } diff --git a/src/mongo/db/cloner.h b/src/mongo/db/cloner.h index 0760355a3ff..8d1d512fe1f 100644 --- a/src/mongo/db/cloner.h +++ b/src/mongo/db/cloner.h @@ -95,7 +95,6 @@ private: const NamespaceString& nss, const BSONObj& from_opts, const BSONObj& from_id_index, - Query q, DBClientBase* conn); void _copyIndexes(OperationContext* opCtx, diff --git a/src/mongo/db/commands/dbcommands_d.cpp b/src/mongo/db/commands/dbcommands_d.cpp index e7a6b76d2cf..67a1549b4fc 100644 --- a/src/mongo/db/commands/dbcommands_d.cpp +++ b/src/mongo/db/commands/dbcommands_d.cpp @@ -410,9 +410,8 @@ public: const BSONObj& query, const BSONObj& sort) { DBDirectClient client(opCtx); - Query q(query); - q.sort(sort); - std::unique_ptr<DBClientCursor> c = client.query(NamespaceString(ns), q); + std::unique_ptr<DBClientCursor> c = + client.query(NamespaceString(ns), query, Query().sort(sort)); while (c->more()) { LOGV2(20454, "Chunk: {chunk}", "Dumping chunks", "chunk"_attr = c->nextSafe()); } diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp index 2f6d761854e..d4da340dc5b 100644 --- a/src/mongo/db/commands/user_management_commands.cpp +++ b/src/mongo/db/commands/user_management_commands.cpp @@ -220,7 +220,7 @@ Status queryAuthzDocument(OperationContext* opCtx, const std::function<void(const BSONObj&)>& resultProcessor) { try { DBDirectClient client(opCtx); - client.query(resultProcessor, collectionName, query, &projection); + client.query(resultProcessor, collectionName, query, Query(), &projection); return Status::OK(); } catch (const DBException& e) { return e.toStatus(); diff --git a/src/mongo/db/dbdirectclient.cpp b/src/mongo/db/dbdirectclient.cpp index f2e0b3bd629..33edc482347 100644 --- a/src/mongo/db/dbdirectclient.cpp +++ b/src/mongo/db/dbdirectclient.cpp @@ -153,7 +153,8 @@ void DBDirectClient::say(Message& toSend, bool isRetry, string* actualServer) { } unique_ptr<DBClientCursor> DBDirectClient::query(const NamespaceStringOrUUID& nsOrUuid, - Query query, + const BSONObj& filter, + const Query& querySettings, int limit, int nToSkip, const BSONObj* fieldsToReturn, @@ -162,7 +163,7 @@ unique_ptr<DBClientCursor> DBDirectClient::query(const NamespaceStringOrUUID& ns boost::optional<BSONObj> readConcernObj) { invariant(!readConcernObj, "passing readConcern to DBDirectClient functions is not supported"); return DBClientBase::query( - nsOrUuid, query, limit, nToSkip, fieldsToReturn, queryOptions, batchSize); + nsOrUuid, filter, querySettings, limit, nToSkip, fieldsToReturn, queryOptions, batchSize); } write_ops::FindAndModifyCommandReply DBDirectClient::findAndModify( diff --git a/src/mongo/db/dbdirectclient.h b/src/mongo/db/dbdirectclient.h index 74a09f8cef3..94eff680c4a 100644 --- a/src/mongo/db/dbdirectclient.h +++ b/src/mongo/db/dbdirectclient.h @@ -56,7 +56,8 @@ public: std::unique_ptr<DBClientCursor> query( const NamespaceStringOrUUID& nsOrUuid, - Query query, + const BSONObj& filter, + const Query& querySettings = Query(), int limit = 0, int nToSkip = 0, const BSONObj* fieldsToReturn = nullptr, diff --git a/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp b/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp index e8a167ccdcc..897cdc62e2b 100644 --- a/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp +++ b/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp @@ -142,38 +142,41 @@ auto startExhaustQuery( Milliseconds awaitDataTimeoutMS = Milliseconds(5000), const boost::optional<repl::OpTime>& lastKnownCommittedOpTime = boost::none) { queryOptions = queryOptions | QueryOption_Exhaust; - auto queryThread = stdx::async( - stdx::launch::async, - [&queryCursor, - queryConnection, - queryOptions, - awaitDataTimeoutMS, - lastKnownCommittedOpTime] { - const auto projSpec = BSON("_id" << 0 << "a" << 1); - // Issue the initial 'find' with a batchSize of 2 and the exhaust flag set. We then - // iterate through the first batch and confirm that the results are as expected. - queryCursor = queryConnection->query(testNSS, {}, 0, 0, &projSpec, queryOptions, 2); - for (int i = 0; i < 2; ++i) { - ASSERT_BSONOBJ_EQ(queryCursor->nextSafe(), BSON("a" << i)); - } - // Having exhausted the two results returned by the initial find, we set the batchSize - // to 1 and issue a single getMore via DBClientCursor::more(). Because the 'exhaust' - // flag is set, the server will generate a series of internal getMores and stream them - // back to the client until the cursor is exhausted, without the client sending any - // further getMore requests. We expect this request to hang at the - // 'waitWithPinnedCursorDuringGetMoreBatch' failpoint. - queryCursor->setBatchSize(1); - if ((queryOptions & QueryOption_CursorTailable) && - (queryOptions & QueryOption_AwaitData)) { - queryCursor->setAwaitDataTimeoutMS(awaitDataTimeoutMS); - if (lastKnownCommittedOpTime) { - auto term = lastKnownCommittedOpTime.get().getTerm(); - queryCursor->setCurrentTermAndLastCommittedOpTime(term, - lastKnownCommittedOpTime); - } - } - ASSERT(queryCursor->more()); - }); + auto queryThread = + stdx::async(stdx::launch::async, + [&queryCursor, + queryConnection, + queryOptions, + awaitDataTimeoutMS, + lastKnownCommittedOpTime] { + const auto projSpec = BSON("_id" << 0 << "a" << 1); + // Issue the initial 'find' with a batchSize of 2 and the exhaust flag set. + // We then iterate through the first batch and confirm that the results are + // as expected. + queryCursor = queryConnection->query( + testNSS, BSONObj{}, Query(), 0, 0, &projSpec, queryOptions, 2); + for (int i = 0; i < 2; ++i) { + ASSERT_BSONOBJ_EQ(queryCursor->nextSafe(), BSON("a" << i)); + } + // Having exhausted the two results returned by the initial find, we set the + // batchSize to 1 and issue a single getMore via DBClientCursor::more(). + // Because the 'exhaust' flag is set, the server will generate a series of + // internal getMores and stream them back to the client until the cursor is + // exhausted, without the client sending any further getMore requests. We + // expect this request to hang at the + // 'waitWithPinnedCursorDuringGetMoreBatch' failpoint. + queryCursor->setBatchSize(1); + if ((queryOptions & QueryOption_CursorTailable) && + (queryOptions & QueryOption_AwaitData)) { + queryCursor->setAwaitDataTimeoutMS(awaitDataTimeoutMS); + if (lastKnownCommittedOpTime) { + auto term = lastKnownCommittedOpTime.get().getTerm(); + queryCursor->setCurrentTermAndLastCommittedOpTime( + term, lastKnownCommittedOpTime); + } + } + ASSERT(queryCursor->more()); + }); // Wait until the parallel operation initializes its cursor. const auto startTime = clock->now(); diff --git a/src/mongo/db/op_observer_impl_test.cpp b/src/mongo/db/op_observer_impl_test.cpp index c197c936cc5..4bbe23b0a66 100644 --- a/src/mongo/db/op_observer_impl_test.cpp +++ b/src/mongo/db/op_observer_impl_test.cpp @@ -785,7 +785,7 @@ protected: boost::optional<DurableTxnStateEnum> txnState) { DBDirectClient client(opCtx()); auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace, - {BSON("_id" << session()->getSessionId().toBSON())}); + BSON("_id" << session()->getSessionId().toBSON())); ASSERT(cursor); ASSERT(cursor->more()); @@ -811,7 +811,7 @@ protected: void assertNoTxnRecord() { DBDirectClient client(opCtx()); auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace, - {BSON("_id" << session()->getSessionId().toBSON())}); + BSON("_id" << session()->getSessionId().toBSON())); ASSERT(cursor); ASSERT(!cursor->more()); } @@ -819,7 +819,7 @@ protected: void assertTxnRecordStartOpTime(boost::optional<repl::OpTime> startOpTime) { DBDirectClient client(opCtx()); auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace, - {BSON("_id" << session()->getSessionId().toBSON())}); + BSON("_id" << session()->getSessionId().toBSON())); ASSERT(cursor); ASSERT(cursor->more()); diff --git a/src/mongo/db/ops/write_ops_retryability.cpp b/src/mongo/db/ops/write_ops_retryability.cpp index a09d98495ee..65fffa768d9 100644 --- a/src/mongo/db/ops/write_ops_retryability.cpp +++ b/src/mongo/db/ops/write_ops_retryability.cpp @@ -118,8 +118,8 @@ BSONObj extractPreOrPostImage(OperationContext* opCtx, const repl::OplogEntry& o LogicalSessionId sessionId = oplog.getSessionId().get(); TxnNumber txnNumber = oplog.getTxnNumber().get(); Timestamp ts = oplog.getTimestamp(); - const auto query = BSON("_id" << sessionId.toBSON()); - BSONObj imageDoc = client.findOne(NamespaceString::kConfigImagesNamespace.ns(), query); + BSONObj imageDoc = client.findOne(NamespaceString::kConfigImagesNamespace.ns(), + BSON("_id" << sessionId.toBSON()) /*filter*/); if (imageDoc.isEmpty()) { LOGV2_WARNING(5676402, "Image lookup for a retryable findAndModify was not found", @@ -170,9 +170,8 @@ BSONObj extractPreOrPostImage(OperationContext* opCtx, const repl::OplogEntry& o auto opTime = oplog.getPreImageOpTime() ? oplog.getPreImageOpTime().value() : oplog.getPostImageOpTime().value(); - auto oplogDoc = - client.findOne(NamespaceString::kRsOplogNamespace.ns(), opTime.asQuery(), nullptr); + client.findOne(NamespaceString::kRsOplogNamespace.ns(), opTime.asQuery(), Query(), nullptr); uassert(40613, str::stream() << "oplog no longer contains the complete write history of this " diff --git a/src/mongo/db/persistent_task_store.h b/src/mongo/db/persistent_task_store.h index cef18c359c4..d5f3d7a2c16 100644 --- a/src/mongo/db/persistent_task_store.h +++ b/src/mongo/db/persistent_task_store.h @@ -83,10 +83,10 @@ public: * multiple documents match, at most one document will be updated. */ void update(OperationContext* opCtx, - Query query, + const BSONObj& filter, const BSONObj& update, const WriteConcernOptions& writeConcern = WriteConcerns::kMajorityWriteConcern) { - _update(opCtx, std::move(query), update, /* upsert */ false, writeConcern); + _update(opCtx, filter, update, /* upsert */ false, writeConcern); } /** @@ -94,17 +94,17 @@ public: * multiple documents match, at most one document will be updated. */ void upsert(OperationContext* opCtx, - Query query, + const BSONObj& filter, const BSONObj& update, const WriteConcernOptions& writeConcern = WriteConcerns::kMajorityWriteConcern) { - _update(opCtx, std::move(query), update, /* upsert */ true, writeConcern); + _update(opCtx, filter, update, /* upsert */ true, writeConcern); } /** * Removes all documents which match the given query. */ void remove(OperationContext* opCtx, - Query query, + const BSONObj& filter, const WriteConcernOptions& writeConcern = WriteConcerns::kMajorityWriteConcern) { DBDirectClient dbClient(opCtx); @@ -114,7 +114,7 @@ public: deleteOp.setDeletes({[&] { write_ops::DeleteOpEntry entry; - entry.setQ(query.obj); + entry.setQ(filter); entry.setMulti(true); return entry; @@ -136,10 +136,12 @@ public: * Iteration can be stopped early if the callback returns false indicating that it doesn't want * to continue. */ - void forEach(OperationContext* opCtx, Query query, std::function<bool(const T&)> handler) { + void forEach(OperationContext* opCtx, + const BSONObj& filter, + std::function<bool(const T&)> handler) { DBDirectClient dbClient(opCtx); - auto cursor = dbClient.query(_storageNss, query); + auto cursor = dbClient.query(_storageNss, filter); while (cursor->more()) { auto bson = cursor->next(); @@ -154,18 +156,18 @@ public: /** * Returns the number of documents in the store matching the given query. */ - size_t count(OperationContext* opCtx, Query query = Query()) { + size_t count(OperationContext* opCtx, const BSONObj& filter = BSONObj{}) { DBDirectClient client(opCtx); auto projection = BSON("_id" << 1); - auto cursor = client.query(_storageNss, query, 0, 0, &projection); + auto cursor = client.query(_storageNss, filter, Query(), 0, 0, &projection); return cursor->itcount(); } private: void _update(OperationContext* opCtx, - Query query, + const BSONObj& filter, const BSONObj& update, bool upsert, const WriteConcernOptions& writeConcern = WriteConcerns::kMajorityWriteConcern) { @@ -174,7 +176,7 @@ private: auto commandResponse = dbClient.runCommand([&] { write_ops::UpdateCommandRequest updateOp(_storageNss); auto updateModification = write_ops::UpdateModification::parseFromClassicUpdate(update); - write_ops::UpdateOpEntry updateEntry(query.obj, updateModification); + write_ops::UpdateOpEntry updateEntry(filter, updateModification); updateEntry.setMulti(false); updateEntry.setUpsert(upsert); updateOp.setUpdates({updateEntry}); @@ -187,7 +189,7 @@ private: uassert(ErrorCodes::NoMatchingDocument, "No matching document found for query {} on namespace {}"_format( - query.toString(), _storageNss.toString()), + filter.toString(), _storageNss.toString()), upsert || commandReply.getIntField("n") > 0); WriteConcernResult ignoreResult; diff --git a/src/mongo/db/persistent_task_store_test.cpp b/src/mongo/db/persistent_task_store_test.cpp index a8167ac9dbb..dd995a3eda0 100644 --- a/src/mongo/db/persistent_task_store_test.cpp +++ b/src/mongo/db/persistent_task_store_test.cpp @@ -105,8 +105,8 @@ TEST_F(PersistentTaskStoreTest, TestForEach) { // No match. int count = 0; store.forEach(opCtx, - QUERY("key" - << "four"), + BSON("key" + << "four"), [&count](const TestTask& t) { ++count; return true; @@ -115,7 +115,7 @@ TEST_F(PersistentTaskStoreTest, TestForEach) { // Multiple matches. count = 0; - store.forEach(opCtx, QUERY("min" << GTE << 10), [&count](const TestTask& t) { + store.forEach(opCtx, BSON("min" << GTE << 10), [&count](const TestTask& t) { ++count; return true; }); @@ -123,7 +123,7 @@ TEST_F(PersistentTaskStoreTest, TestForEach) { // Multiple matches, only take one. count = 0; - store.forEach(opCtx, QUERY("min" << GTE << 10), [&count](const TestTask& t) { + store.forEach(opCtx, BSON("min" << GTE << 10), [&count](const TestTask& t) { ++count; return count < 1; }); @@ -132,8 +132,8 @@ TEST_F(PersistentTaskStoreTest, TestForEach) { // Single match. count = 0; store.forEach(opCtx, - QUERY("key" - << "one"), + BSON("key" + << "one"), [&count](const TestTask& t) { ++count; return true; @@ -153,8 +153,8 @@ TEST_F(PersistentTaskStoreTest, TestRemove) { ASSERT_EQ(store.count(opCtx), 3); store.remove(opCtx, - QUERY("key" - << "one")); + BSON("key" + << "one")); ASSERT_EQ(store.count(opCtx), 2); } @@ -171,7 +171,7 @@ TEST_F(PersistentTaskStoreTest, TestRemoveMultiple) { ASSERT_EQ(store.count(opCtx), 3); // Remove multipe overlapping ranges. - store.remove(opCtx, QUERY("min" << GTE << 10)); + store.remove(opCtx, BSON("min" << GTE << 10)); ASSERT_EQ(store.count(opCtx), 1); } @@ -189,13 +189,13 @@ TEST_F(PersistentTaskStoreTest, TestUpdate) { ASSERT_EQ(store.count(opCtx), 3); store.update(opCtx, - QUERY("key" - << "one"), + BSON("key" + << "one"), BSON("$inc" << BSON("min" << 1))); store.forEach(opCtx, - QUERY("key" - << "one"), + BSON("key" + << "one"), [&](const TestTask& task) { ASSERT_EQ(task.min, expectedUpdatedMin); return false; @@ -214,9 +214,9 @@ TEST_F(PersistentTaskStoreTest, TestUpdateOnlyUpdatesOneMatchingDocument) { store.add(opCtx, TestTask{"three", 40, 50}); // Update query will match two documents but should only update one of them. - store.update(opCtx, QUERY("key" << keyToMatch), BSON("$inc" << BSON("min" << 1))); + store.update(opCtx, BSON("key" << keyToMatch), BSON("$inc" << BSON("min" << 1))); - ASSERT_EQ(store.count(opCtx, QUERY("key" << keyToMatch << "min" << expectedUpdatedMin)), 1); + ASSERT_EQ(store.count(opCtx, BSON("key" << keyToMatch << "min" << expectedUpdatedMin)), 1); } TEST_F(PersistentTaskStoreTest, TestUpsert) { @@ -225,7 +225,7 @@ TEST_F(PersistentTaskStoreTest, TestUpsert) { PersistentTaskStore<TestTask> store(kNss); std::string keyToMatch = "foo"; - auto query = QUERY("key" << keyToMatch); + auto query = BSON("key" << keyToMatch); TestTask task(keyToMatch, 0, 0); BSONObj taskBson = task.toBSON(); @@ -281,15 +281,15 @@ TEST_F(PersistentTaskStoreTest, TestWritesPersistAcrossInstances) { PersistentTaskStore<TestTask> store(kNss); ASSERT_EQ(store.count(opCtx), 3); - auto count = store.count(opCtx, QUERY("min" << GTE << 10)); + auto count = store.count(opCtx, BSON("min" << GTE << 10)); ASSERT_EQ(count, 2); store.remove(opCtx, - QUERY("key" - << "two")); + BSON("key" + << "two")); ASSERT_EQ(store.count(opCtx), 2); - count = store.count(opCtx, QUERY("min" << GTE << 10)); + count = store.count(opCtx, BSON("min" << GTE << 10)); ASSERT_EQ(count, 1); } @@ -297,7 +297,7 @@ TEST_F(PersistentTaskStoreTest, TestWritesPersistAcrossInstances) { PersistentTaskStore<TestTask> store(kNss); ASSERT_EQ(store.count(opCtx), 2); - auto count = store.count(opCtx, QUERY("min" << GTE << 10)); + auto count = store.count(opCtx, BSON("min" << GTE << 10)); ASSERT_EQ(count, 1); } } @@ -312,16 +312,16 @@ TEST_F(PersistentTaskStoreTest, TestCountWithQuery) { store.add(opCtx, TestTask{"two", 40, 50}); ASSERT_EQ(store.count(opCtx, - QUERY("key" - << "two")), + BSON("key" + << "two")), 2); // Remove multipe overlapping ranges. - store.remove(opCtx, QUERY("min" << 10)); + store.remove(opCtx, BSON("min" << 10)); ASSERT_EQ(store.count(opCtx, - QUERY("key" - << "two")), + BSON("key" + << "two")), 1); } diff --git a/src/mongo/db/query/query_request_helper.cpp b/src/mongo/db/query/query_request_helper.cpp index 10320583a71..90d6e386d56 100644 --- a/src/mongo/db/query/query_request_helper.cpp +++ b/src/mongo/db/query/query_request_helper.cpp @@ -36,6 +36,7 @@ #include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/client/query.h" #include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/dbmessage.h" @@ -139,10 +140,7 @@ Status initFullQuery(const BSONObj& top, FindCommandRequest* findCommand) { } } else if (name.startsWith("$")) { name = name.substr(1); // chop first char - if (name == "explain") { - return Status(ErrorCodes::Error(5856600), - "the $explain OP_QUERY flag is no longer supported"); - } else if (name == "min") { + if (name == "min") { if (!e.isABSONObj()) { return Status(ErrorCodes::BadValue, "$min must be a BSONObj"); } @@ -187,7 +185,8 @@ Status initFullQuery(const BSONObj& top, FindCommandRequest* findCommand) { Status initFindCommandRequest(int ntoskip, int queryOptions, - const BSONObj& queryObj, + const BSONObj& filter, + const Query& querySettings, const BSONObj& proj, FindCommandRequest* findCommand) { if (!proj.isEmpty()) { @@ -200,19 +199,12 @@ Status initFindCommandRequest(int ntoskip, // Initialize flags passed as 'queryOptions' bit vector. initFromInt(queryOptions, findCommand); - BSONElement queryField = queryObj["query"]; - if (!queryField.isABSONObj()) { - queryField = queryObj["$query"]; - } - if (queryField.isABSONObj()) { - findCommand->setFilter(queryField.embeddedObject().getOwned()); - Status status = initFullQuery(queryObj, findCommand); - if (!status.isOK()) { - return status; - } - } else { - findCommand->setFilter(queryObj.getOwned()); + findCommand->setFilter(filter.getOwned()); + Status status = initFullQuery(querySettings.getFullSettingsDeprecated(), findCommand); + if (!status.isOK()) { + return status; } + // It's not possible to specify readConcern in a legacy query message, so initialize it to // an empty readConcern object, ie. equivalent to `readConcern: {}`. This ensures that // mongos passes this empty readConcern to shards. @@ -393,14 +385,15 @@ void validateCursorResponse(const BSONObj& outputAsBson) { // StatusWith<std::unique_ptr<FindCommandRequest>> fromLegacyQuery(NamespaceStringOrUUID nssOrUuid, - const BSONObj& queryObj, + const BSONObj& filter, + const Query& querySettings, const BSONObj& proj, int ntoskip, int queryOptions) { auto findCommand = std::make_unique<FindCommandRequest>(std::move(nssOrUuid)); - Status status = - initFindCommandRequest(ntoskip, queryOptions, queryObj, proj, findCommand.get()); + Status status = initFindCommandRequest( + ntoskip, queryOptions, filter, querySettings, proj, findCommand.get()); if (!status.isOK()) { return status; } diff --git a/src/mongo/db/query/query_request_helper.h b/src/mongo/db/query/query_request_helper.h index c925c06dd38..3c7cbc53b89 100644 --- a/src/mongo/db/query/query_request_helper.h +++ b/src/mongo/db/query/query_request_helper.h @@ -42,6 +42,7 @@ namespace mongo { class QueryMessage; class Status; +class Query; template <typename T> class StatusWith; @@ -151,7 +152,8 @@ void validateCursorResponse(const BSONObj& outputAsBson); * Parse the provided legacy query object and parameters to construct a FindCommandRequest. */ StatusWith<std::unique_ptr<FindCommandRequest>> fromLegacyQuery(NamespaceStringOrUUID nsOrUuid, - const BSONObj& queryObj, + const BSONObj& filter, + const Query& querySettings, const BSONObj& proj, int ntoskip, int queryOptions); diff --git a/src/mongo/db/query/query_request_test.cpp b/src/mongo/db/query/query_request_test.cpp index 6eeb8704f26..eba29f8c27e 100644 --- a/src/mongo/db/query/query_request_test.cpp +++ b/src/mongo/db/query/query_request_test.cpp @@ -1551,24 +1551,20 @@ TEST(QueryRequestTest, ConvertToFindWithAllowDiskUseFalseSucceeds) { TEST(QueryRequestTest, ParseFromLegacyQuery) { const auto kSkip = 1; const NamespaceString nss("test.testns"); - BSONObj queryObj = fromjson(R"({ - query: {query: 1}, - orderby: {sort: 1}, - $hint: {hint: 1}, - $min: {x: 'min'}, - $max: {x: 'max'} - })"); unique_ptr<FindCommandRequest> findCommand(assertGet(query_request_helper::fromLegacyQuery( - nss, queryObj, BSON("proj" << 1), kSkip, QueryOption_Exhaust))); + nss, + fromjson("{query: 1}") /*filter*/, + Query().sort(BSON("sort" << 1)).hint(BSON("hint" << 1)), + BSON("proj" << 1), + kSkip, + QueryOption_Exhaust))); ASSERT_EQ(*findCommand->getNamespaceOrUUID().nss(), nss); ASSERT_BSONOBJ_EQ(findCommand->getFilter(), fromjson("{query: 1}")); ASSERT_BSONOBJ_EQ(findCommand->getProjection(), fromjson("{proj: 1}")); ASSERT_BSONOBJ_EQ(findCommand->getSort(), fromjson("{sort: 1}")); ASSERT_BSONOBJ_EQ(findCommand->getHint(), fromjson("{hint: 1}")); - ASSERT_BSONOBJ_EQ(findCommand->getMin(), fromjson("{x: 'min'}")); - ASSERT_BSONOBJ_EQ(findCommand->getMax(), fromjson("{x: 'max'}")); ASSERT_EQ(findCommand->getSkip(), boost::optional<int64_t>(kSkip)); ASSERT_FALSE(findCommand->getNtoreturn()); ASSERT_EQ(findCommand->getSingleBatch(), false); @@ -1579,15 +1575,14 @@ TEST(QueryRequestTest, ParseFromLegacyQuery) { TEST(QueryRequestTest, ParseFromLegacyQueryOplogReplayFlagAllowed) { const NamespaceString nss("test.testns"); - auto queryObj = fromjson("{query: {query: 1}, orderby: {sort: 1}}"); const BSONObj projectionObj{}; const auto nToSkip = 0; // Test that parsing succeeds even if the oplog replay bit is set in the OP_QUERY message. This // flag may be set by old clients. auto options = QueryOption_OplogReplay_DEPRECATED; - unique_ptr<FindCommandRequest> findCommand(assertGet( - query_request_helper::fromLegacyQuery(nss, queryObj, projectionObj, nToSkip, options))); + unique_ptr<FindCommandRequest> findCommand(assertGet(query_request_helper::fromLegacyQuery( + nss, fromjson("{query: 1}"), Query().sort("sort", 1), projectionObj, nToSkip, options))); // Verify that if we reserialize the find command, the 'oplogReplay' field // does not appear. @@ -1603,12 +1598,9 @@ TEST(QueryRequestTest, ParseFromLegacyQueryOplogReplayFlagAllowed) { } TEST(QueryRequestTest, ParseFromLegacyQueryUnwrapped) { - BSONObj queryObj = fromjson(R"({ - foo: 1 - })"); const NamespaceString nss("test.testns"); - unique_ptr<FindCommandRequest> findCommand(assertGet( - query_request_helper::fromLegacyQuery(nss, queryObj, BSONObj(), 0, QueryOption_Exhaust))); + unique_ptr<FindCommandRequest> findCommand(assertGet(query_request_helper::fromLegacyQuery( + nss, fromjson("{foo: 1}"), Query(), BSONObj(), 0, QueryOption_Exhaust))); ASSERT_EQ(*findCommand->getNamespaceOrUUID().nss(), nss); ASSERT_BSONOBJ_EQ(findCommand->getFilter(), fromjson("{foo: 1}")); @@ -1628,20 +1620,6 @@ TEST(QueryRequestHelperTest, ValidateResponseWrongDataType) { ErrorCodes::TypeMismatch); } -TEST(QueryRequestTest, ParseFromLegacyQueryExplainError) { - BSONObj queryObj = fromjson(R"({ - query: {query: 1}, - $explain: false - })"); - - const NamespaceString nss("test.testns"); - ASSERT_EQUALS( - query_request_helper::fromLegacyQuery(nss, queryObj, BSONObj(), 0, QueryOption_Exhaust) - .getStatus() - .code(), - static_cast<ErrorCodes::Error>(5856600)); -} - class QueryRequestTest : public ServiceContextTest {}; TEST_F(QueryRequestTest, ParseFromUUID) { diff --git a/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp b/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp index 72ed4b8e9b7..1b341ab4b11 100644 --- a/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp +++ b/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp @@ -50,7 +50,7 @@ BSONObj getPersistedDefaultRWConcernDocument(OperationContext* opCtx) { DBDirectClient client(opCtx); return client.findOne(NamespaceString::kConfigSettingsNamespace.toString(), - QUERY("_id" << ReadWriteConcernDefaults::kPersistedDocumentId)); + BSON("_id" << ReadWriteConcernDefaults::kPersistedDocumentId)); } } // namespace diff --git a/src/mongo/db/repl/all_database_cloner.cpp b/src/mongo/db/repl/all_database_cloner.cpp index 4987ef3e937..8a7159b5dde 100644 --- a/src/mongo/db/repl/all_database_cloner.cpp +++ b/src/mongo/db/repl/all_database_cloner.cpp @@ -132,7 +132,7 @@ BaseCloner::AfterStageBehavior AllDatabaseCloner::getInitialSyncIdStage() { if (wireVersion < WireVersion::RESUMABLE_INITIAL_SYNC) return kContinueNormally; auto initialSyncId = getClient()->findOne( - ReplicationConsistencyMarkersImpl::kDefaultInitialSyncIdNamespace.toString(), Query()); + ReplicationConsistencyMarkersImpl::kDefaultInitialSyncIdNamespace.toString(), BSONObj{}); uassert(ErrorCodes::InitialSyncFailure, "Cannot retrieve sync source initial sync ID", !initialSyncId.isEmpty()); diff --git a/src/mongo/db/repl/all_database_cloner_test.cpp b/src/mongo/db/repl/all_database_cloner_test.cpp index 3204c419a12..b77caa19e47 100644 --- a/src/mongo/db/repl/all_database_cloner_test.cpp +++ b/src/mongo/db/repl/all_database_cloner_test.cpp @@ -384,7 +384,8 @@ TEST_F(AllDatabaseClonerTest, RetriesListDatabasesButInitialSyncIdChanges) { // Clear and change the initial sync ID _mockServer->remove( - ReplicationConsistencyMarkersImpl::kDefaultInitialSyncIdNamespace.toString(), Query()); + ReplicationConsistencyMarkersImpl::kDefaultInitialSyncIdNamespace.toString(), + BSONObj{} /*filter*/); _mockServer->insert( ReplicationConsistencyMarkersImpl::kDefaultInitialSyncIdNamespace.toString(), BSON("_id" << UUID::gen())); diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp index e8a89e190ce..41b8c8fd858 100644 --- a/src/mongo/db/repl/apply_ops.cpp +++ b/src/mongo/db/repl/apply_ops.cpp @@ -314,7 +314,9 @@ Status _checkPrecondition(OperationContext* opCtx, } DBDirectClient db(opCtx); - BSONObj realres = db.findOne(nss.ns(), preCondition["q"].Obj()); + // The preconditions come in "q: {{query: {...}, orderby: ..., etc.}}" format. + auto preconditionQuery = Query::fromBSONDeprecated(preCondition["q"].Obj()); + BSONObj realres = db.findOne(nss.ns(), preconditionQuery.getFilter(), preconditionQuery); // Get collection default collation. auto databaseHolder = DatabaseHolder::get(opCtx); diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp index 35b1ddf6d7b..191f1331101 100644 --- a/src/mongo/db/repl/collection_cloner.cpp +++ b/src/mongo/db/repl/collection_cloner.cpp @@ -308,12 +308,11 @@ void CollectionCloner::runQuery() { if (_resumeToken) { // Resume the query from where we left off. LOGV2_DEBUG(21133, 1, "Collection cloner will resume the last successful query"); - query = QUERY("query" << BSONObj() << "$_requestResumeToken" << true << "$_resumeAfter" - << _resumeToken.get()); + query.requestResumeToken(true).resumeAfter(_resumeToken.get()); } else { // New attempt at a resumable query. LOGV2_DEBUG(21134, 1, "Collection cloner will run a new query"); - query = QUERY("query" << BSONObj() << "$_requestResumeToken" << true); + query.requestResumeToken(true); } query.hint(BSON("$natural" << 1)); } @@ -326,6 +325,7 @@ void CollectionCloner::runQuery() { try { getClient()->query([this](DBClientCursorBatchIterator& iter) { handleNextBatch(iter); }, _sourceDbAndUuid, + BSONObj{}, query, nullptr /* fieldsToReturn */, QueryOption_NoCursorTimeout | QueryOption_SecondaryOk | diff --git a/src/mongo/db/repl/initial_sync_base_cloner.cpp b/src/mongo/db/repl/initial_sync_base_cloner.cpp index 314ef926557..0c00012b3f7 100644 --- a/src/mongo/db/repl/initial_sync_base_cloner.cpp +++ b/src/mongo/db/repl/initial_sync_base_cloner.cpp @@ -128,7 +128,8 @@ Status InitialSyncBaseCloner::checkInitialSyncIdIsUnchanged() { BSONObj initialSyncId; try { initialSyncId = getClient()->findOne( - ReplicationConsistencyMarkersImpl::kDefaultInitialSyncIdNamespace.toString(), Query()); + ReplicationConsistencyMarkersImpl::kDefaultInitialSyncIdNamespace.toString(), + BSONObj{}); } catch (DBException& e) { if (ErrorCodes::isRetriableError(e)) { auto status = e.toStatus().withContext( diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index 4d50cb7e241..d3f0653a744 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -1967,8 +1967,11 @@ void setNewTimestamp(ServiceContext* service, const Timestamp& newTime) { void initTimestampFromOplog(OperationContext* opCtx, const NamespaceString& oplogNss) { DBDirectClient c(opCtx); static const BSONObj reverseNaturalObj = BSON("$natural" << -1); - BSONObj lastOp = - c.findOne(oplogNss.ns(), Query().sort(reverseNaturalObj), nullptr, QueryOption_SecondaryOk); + BSONObj lastOp = c.findOne(oplogNss.ns(), + BSONObj{}, + Query().sort(reverseNaturalObj), + nullptr, + QueryOption_SecondaryOk); if (!lastOp.isEmpty()) { LOGV2_DEBUG(21256, 1, "replSet setting last Timestamp"); diff --git a/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp b/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp index cda89640fdf..9a2bc34390f 100644 --- a/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp +++ b/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp @@ -350,7 +350,7 @@ void checkTxnTable(OperationContext* opCtx, boost::optional<DurableTxnStateEnum> expectedState) { DBDirectClient client(opCtx); auto result = client.findOne(NamespaceString::kSessionTransactionsTableNamespace.ns(), - {BSON(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON())}); + BSON(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON())); ASSERT_FALSE(result.isEmpty()); auto txnRecord = @@ -393,7 +393,7 @@ StatusWith<BSONObj> CollectionReader::next() { bool docExists(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc) { DBDirectClient client(opCtx); - auto result = client.findOne(nss.ns(), {doc}); + auto result = client.findOne(nss.ns(), doc); return !result.isEmpty(); } diff --git a/src/mongo/db/repl/oplog_fetcher.cpp b/src/mongo/db/repl/oplog_fetcher.cpp index 08b5039f404..b7961ef871d 100644 --- a/src/mongo/db/repl/oplog_fetcher.cpp +++ b/src/mongo/db/repl/oplog_fetcher.cpp @@ -264,8 +264,12 @@ OpTime OplogFetcher::getLastOpTimeFetched_forTest() const { return _getLastOpTimeFetched(); } -BSONObj OplogFetcher::getFindQuery_forTest(long long findTimeout) const { - return _makeFindQuery(findTimeout); +BSONObj OplogFetcher::getFindQueryFilter_forTest() const { + return _makeFindQueryFilter(); +} + +Query OplogFetcher::getFindQuerySettings_forTest(long long findTimeout) const { + return _makeFindQuerySettings(findTimeout); } Milliseconds OplogFetcher::getAwaitDataTimeout_forTest() const { @@ -572,11 +576,11 @@ AggregateCommandRequest OplogFetcher::_makeAggregateCommandRequest(long long max return aggRequest; } -BSONObj OplogFetcher::_makeFindQuery(long long findTimeout) const { +BSONObj OplogFetcher::_makeFindQueryFilter() const { BSONObjBuilder queryBob; auto lastOpTimeFetched = _getLastOpTimeFetched(); - BSONObjBuilder filterBob(queryBob.subobjStart("query")); + BSONObjBuilder filterBob; filterBob.append("ts", BSON("$gte" << lastOpTimeFetched.getTimestamp())); // Handle caller-provided filter. if (!_config.queryFilter.isEmpty()) { @@ -584,34 +588,34 @@ BSONObj OplogFetcher::_makeFindQuery(long long findTimeout) const { "$or", BSON_ARRAY(_config.queryFilter << BSON("ts" << lastOpTimeFetched.getTimestamp()))); } - filterBob.done(); + return filterBob.obj(); +} - queryBob.append("$maxTimeMS", findTimeout); +Query OplogFetcher::_makeFindQuerySettings(long long findTimeout) const { + Query query = Query().maxTimeMS(findTimeout); if (_config.requestResumeToken) { - queryBob.append("$hint", BSON("$natural" << 1)); - queryBob.append("$_requestResumeToken", true); + query.hint(BSON("$natural" << 1)).requestResumeToken(true); } auto lastCommittedWithCurrentTerm = _dataReplicatorExternalState->getCurrentTermAndLastCommittedOpTime(); auto term = lastCommittedWithCurrentTerm.value; if (term != OpTime::kUninitializedTerm) { - queryBob.append("term", term); + query.term(term); } if (_config.queryReadConcern.isEmpty()) { // This ensures that the sync source waits for all earlier oplog writes to be visible. // Since Timestamp(0, 0) isn't allowed, Timestamp(0, 1) is the minimal we can use. - queryBob.append("readConcern", - BSON("level" - << "local" - << "afterClusterTime" << Timestamp(0, 1))); + query.readConcern(BSON("level" + << "local" + << "afterClusterTime" << Timestamp(0, 1))); } else { // Caller-provided read concern. - queryBob.appendElements(_config.queryReadConcern.toBSON()); + query.appendElements(_config.queryReadConcern.toBSON()); } - return queryBob.obj(); + return query; } Status OplogFetcher::_createNewCursor(bool initialFind) { @@ -642,7 +646,8 @@ Status OplogFetcher::_createNewCursor(bool initialFind) { _cursor = std::make_unique<DBClientCursor>( _conn.get(), _nss, - _makeFindQuery(maxTimeMs), + _makeFindQueryFilter(), + _makeFindQuerySettings(maxTimeMs), 0 /* nToReturn */, 0 /* nToSkip */, nullptr /* fieldsToReturn */, @@ -1039,7 +1044,7 @@ Status OplogFetcher::_checkTooStaleToSyncFromSource(const OpTime lastFetched, auto query = Query().sort(BSON("$natural" << 1)); // Since this function is called after the first batch, the exhaust stream has not been // started yet. As a result, using the same connection is safe. - remoteFirstOplogEntry = _conn->findOne(_nss.ns(), query); + remoteFirstOplogEntry = _conn->findOne(_nss.ns(), BSONObj{}, query); } catch (DBException& e) { // If an error occurs with the query, throw an error. return Status(ErrorCodes::TooStaleToSyncFromSource, e.reason()); diff --git a/src/mongo/db/repl/oplog_fetcher.h b/src/mongo/db/repl/oplog_fetcher.h index 6a67613e7e4..8c854024ceb 100644 --- a/src/mongo/db/repl/oplog_fetcher.h +++ b/src/mongo/db/repl/oplog_fetcher.h @@ -275,7 +275,8 @@ public: /** * Returns the `find` query run on the sync source's oplog. */ - BSONObj getFindQuery_forTest(long long findTimeout) const; + BSONObj getFindQueryFilter_forTest() const; + Query getFindQuerySettings_forTest(long long findTimeout) const; /** * Returns the OpTime of the last oplog entry fetched and processed. @@ -389,7 +390,8 @@ private: * whether this is the initial attempt to create the `find` query to determine what the find * timeout should be. */ - BSONObj _makeFindQuery(long long findTimeout) const; + BSONObj _makeFindQueryFilter() const; + Query _makeFindQuerySettings(long long findTimeout) const; /** * Gets the next batch from the exhaust cursor. diff --git a/src/mongo/db/repl/oplog_fetcher_test.cpp b/src/mongo/db/repl/oplog_fetcher_test.cpp index ea790173e49..1e3b9c3813a 100644 --- a/src/mongo/db/repl/oplog_fetcher_test.cpp +++ b/src/mongo/db/repl/oplog_fetcher_test.cpp @@ -793,12 +793,14 @@ TEST_F(OplogFetcherTest, // Test that the correct maxTimeMS is set if this is the initial 'find' query. auto oplogFetcher = makeOplogFetcher(); auto findTimeout = durationCount<Milliseconds>(oplogFetcher->getInitialFindMaxTime_forTest()); - auto queryObj = oplogFetcher->getFindQuery_forTest(findTimeout); + + auto filter = oplogFetcher->getFindQueryFilter_forTest(); + ASSERT_BSONOBJ_EQ(BSON("ts" << BSON("$gte" << lastFetched.getTimestamp())), filter); + + auto queryObj = + (oplogFetcher->getFindQuerySettings_forTest(findTimeout)).getFullSettingsDeprecated(); ASSERT_EQUALS(60000, queryObj.getIntField("$maxTimeMS")); - ASSERT_EQUALS(mongo::BSONType::Object, queryObj["query"].type()); - ASSERT_BSONOBJ_EQ(BSON("ts" << BSON("$gte" << lastFetched.getTimestamp())), - queryObj["query"].Obj()); ASSERT_EQUALS(mongo::BSONType::Object, queryObj["readConcern"].type()); ASSERT_BSONOBJ_EQ(BSON("level" << "local" @@ -812,14 +814,15 @@ TEST_F(OplogFetcherTest, dataReplicatorExternalState->currentTerm = OpTime::kUninitializedTerm; auto oplogFetcher = makeOplogFetcher(); + auto filter = oplogFetcher->getFindQueryFilter_forTest(); + ASSERT_BSONOBJ_EQ(BSON("ts" << BSON("$gte" << lastFetched.getTimestamp())), filter); + // Test that the correct maxTimeMS is set if we are retrying the 'find' query. auto findTimeout = durationCount<Milliseconds>(oplogFetcher->getRetriedFindMaxTime_forTest()); - auto queryObj = oplogFetcher->getFindQuery_forTest(findTimeout); + auto queryObj = + (oplogFetcher->getFindQuerySettings_forTest(findTimeout)).getFullSettingsDeprecated(); ASSERT_EQUALS(2000, queryObj.getIntField("$maxTimeMS")); - ASSERT_EQUALS(mongo::BSONType::Object, queryObj["query"].type()); - ASSERT_BSONOBJ_EQ(BSON("ts" << BSON("$gte" << lastFetched.getTimestamp())), - queryObj["query"].Obj()); ASSERT_EQUALS(mongo::BSONType::Object, queryObj["readConcern"].type()); ASSERT_BSONOBJ_EQ(BSON("level" << "local" diff --git a/src/mongo/db/repl/oplog_interface_remote.cpp b/src/mongo/db/repl/oplog_interface_remote.cpp index 2008daae903..1a9e3e6a180 100644 --- a/src/mongo/db/repl/oplog_interface_remote.cpp +++ b/src/mongo/db/repl/oplog_interface_remote.cpp @@ -85,6 +85,7 @@ std::unique_ptr<OplogInterface::Iterator> OplogInterfaceRemote::makeIterator() c const BSONObj fields = BSON("ts" << 1 << "t" << 1LL); return std::unique_ptr<OplogInterface::Iterator>( new OplogIteratorRemote(_getConnection()->query(NamespaceString(_collectionName), + BSONObj{}, query, 0, 0, diff --git a/src/mongo/db/repl/primary_only_service.cpp b/src/mongo/db/repl/primary_only_service.cpp index e7e3779e97f..8253317329b 100644 --- a/src/mongo/db/repl/primary_only_service.cpp +++ b/src/mongo/db/repl/primary_only_service.cpp @@ -657,7 +657,7 @@ void PrimaryOnlyService::_rebuildInstances(long long term) noexcept { Status(ErrorCodes::InternalError, "Querying state documents failed")); } - auto cursor = client.query(ns, Query()); + auto cursor = client.query(ns, BSONObj{}); while (cursor->more()) { stateDocuments.push_back(cursor->nextSafe().getOwned()); } diff --git a/src/mongo/db/repl/replication_recovery.cpp b/src/mongo/db/repl/replication_recovery.cpp index 24d885ddaba..4ba3692bb9a 100644 --- a/src/mongo/db/repl/replication_recovery.cpp +++ b/src/mongo/db/repl/replication_recovery.cpp @@ -144,7 +144,8 @@ public: ? BSON("$gte" << _oplogApplicationStartPoint << "$lte" << *_oplogApplicationEndPoint) : BSON("$gte" << _oplogApplicationStartPoint); _cursor = _client->query(NamespaceString::kRsOplogNamespace, - QUERY("ts" << predicate), + BSON("ts" << predicate), + /*querySettings*/ Query(), /*limit*/ 0, /*skip*/ 0, /*projection*/ nullptr, diff --git a/src/mongo/db/repl/roll_back_local_operations_test.cpp b/src/mongo/db/repl/roll_back_local_operations_test.cpp index 665955afbba..6a4b3ea84c9 100644 --- a/src/mongo/db/repl/roll_back_local_operations_test.cpp +++ b/src/mongo/db/repl/roll_back_local_operations_test.cpp @@ -321,7 +321,8 @@ public: using DBClientConnection::query; std::unique_ptr<DBClientCursor> query(const NamespaceStringOrUUID& nsOrUuid, - Query query, + const BSONObj& filter, + const Query& querySettings, int limit, int nToSkip, const BSONObj* fieldsToReturn, diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp index 639a20df546..ebeda465968 100644 --- a/src/mongo/db/repl/rollback_impl.cpp +++ b/src/mongo/db/repl/rollback_impl.cpp @@ -477,10 +477,10 @@ void RollbackImpl::_restoreTxnsTableEntryFromRetryableWrites(OperationContext* o << "fromMigrate" << true); auto cursor = client->query( NamespaceString::kRsOplogNamespace, - QUERY("ts" << BSON("$gt" << stableTimestamp) << "txnNumber" << BSON("$exists" << true) - << "stmtId" << BSON("$exists" << true) << "prevOpTime.ts" - << BSON("$gte" << Timestamp(1, 0) << "$lte" << stableTimestamp) << "$or" - << BSON_ARRAY(filter << filterFromMigration))); + BSON("ts" << BSON("$gt" << stableTimestamp) << "txnNumber" << BSON("$exists" << true) + << "stmtId" << BSON("$exists" << true) << "prevOpTime.ts" + << BSON("$gte" << Timestamp(1, 0) << "$lte" << stableTimestamp) << "$or" + << BSON_ARRAY(filter << filterFromMigration))); while (cursor->more()) { auto doc = cursor->next(); auto swEntry = OplogEntry::parse(doc); diff --git a/src/mongo/db/repl/rollback_source_impl.cpp b/src/mongo/db/repl/rollback_source_impl.cpp index 2328705057b..c6184fb7259 100644 --- a/src/mongo/db/repl/rollback_source_impl.cpp +++ b/src/mongo/db/repl/rollback_source_impl.cpp @@ -66,9 +66,9 @@ int RollbackSourceImpl::getRollbackId() const { } BSONObj RollbackSourceImpl::getLastOperation() const { - const Query query = Query().sort(BSON("$natural" << -1)); return _getConnection()->findOne(_collectionName, - query, + BSONObj{}, + Query().sort(BSON("$natural" << -1)), nullptr, QueryOption_SecondaryOk, ReadConcernArgs::kImplicitDefault); @@ -78,6 +78,7 @@ BSONObj RollbackSourceImpl::findOne(const NamespaceString& nss, const BSONObj& f return _getConnection() ->findOne(nss.toString(), filter, + Query(), nullptr, QueryOption_SecondaryOk, ReadConcernArgs::kImplicitDefault) diff --git a/src/mongo/db/repl/tenant_collection_cloner.cpp b/src/mongo/db/repl/tenant_collection_cloner.cpp index 2409c1e81da..75827e5f6b3 100644 --- a/src/mongo/db/repl/tenant_collection_cloner.cpp +++ b/src/mongo/db/repl/tenant_collection_cloner.cpp @@ -212,7 +212,8 @@ BaseCloner::AfterStageBehavior TenantCollectionCloner::checkIfDonorCollectionIsE auto fieldsToReturn = BSON("_id" << 1); auto cursor = getClient()->query(_sourceDbAndUuid, - {} /* Query */, + BSONObj{} /* filter */, + Query() /* querySettings */, 1 /* limit */, 0 /* skip */, &fieldsToReturn, @@ -348,8 +349,8 @@ BaseCloner::AfterStageBehavior TenantCollectionCloner::createCollectionStage() { ON_BLOCK_EXIT([&opCtx] { tenantMigrationRecipientInfo(opCtx.get()) = boost::none; }); auto fieldsToReturn = BSON("_id" << 1); - _lastDocId = - client.findOne(_existingNss->ns(), Query().sort(BSON("_id" << -1)), &fieldsToReturn); + _lastDocId = client.findOne( + _existingNss->ns(), BSONObj{}, Query().sort(BSON("_id" << -1)), &fieldsToReturn); if (!_lastDocId.isEmpty()) { // The collection is not empty. Skip creating indexes and resume cloning from the last // document. @@ -462,21 +463,21 @@ BaseCloner::AfterStageBehavior TenantCollectionCloner::queryStage() { } void TenantCollectionCloner::runQuery() { - auto query = _lastDocId.isEmpty() - ? QUERY("query" << BSONObj()) - // Use $expr and the aggregation version of $gt to avoid type bracketing. - : QUERY("$expr" << BSON("$gt" << BSON_ARRAY("$_id" << _lastDocId["_id"]))); - if (_collectionOptions.clusteredIndex) { + const BSONObj& filter = _lastDocId.isEmpty() + ? BSONObj{} // Use $expr and the aggregation version of $gt to avoid type bracketing. + : BSON("$expr" << BSON("$gt" << BSON_ARRAY("$_id" << _lastDocId["_id"]))); + + auto query = _collectionOptions.clusteredIndex // RecordIds are _id values and has no separate _id index - query.hint(BSON("$natural" << 1)); - } else { - query.hint(BSON("_id" << 1)); - } + ? Query().hint(BSON("$natural" << 1)) + : Query().hint(BSON("_id" << 1)); + // Any errors that are thrown here (including NamespaceNotFound) will be handled on the stage // level. getClient()->query([this](DBClientCursorBatchIterator& iter) { handleNextBatch(iter); }, _sourceDbAndUuid, + filter, query, nullptr /* fieldsToReturn */, QueryOption_NoCursorTimeout | QueryOption_SecondaryOk | diff --git a/src/mongo/db/repl/tenant_migration_recipient_service.cpp b/src/mongo/db/repl/tenant_migration_recipient_service.cpp index e3e2d429f29..fa3c2053719 100644 --- a/src/mongo/db/repl/tenant_migration_recipient_service.cpp +++ b/src/mongo/db/repl/tenant_migration_recipient_service.cpp @@ -573,6 +573,7 @@ OpTime TenantMigrationRecipientService::Instance::_getDonorMajorityOpTime( BSON(OplogEntry::kTimestampFieldName << 1 << OplogEntry::kTermFieldName << 1); auto majorityOpTimeBson = client->findOne(NamespaceString::kRsOplogNamespace.ns(), + BSONObj{}, Query().sort("$natural", -1), &oplogOpTimeFields, QueryOption_SecondaryOk, @@ -867,8 +868,8 @@ void TenantMigrationRecipientService::Instance::_getStartOpTimesFromDonor(WithLo auto transactionTableOpTimeFields = BSON(SessionTxnRecord::kStartOpTimeFieldName << 1); auto earliestOpenTransactionBson = _client->findOne( NamespaceString::kSessionTransactionsTableNamespace.ns(), - QUERY("state" << BSON("$in" << BSON_ARRAY(preparedState << inProgressState))) - .sort(SessionTxnRecord::kStartOpTimeFieldName.toString(), 1), + BSON("state" << BSON("$in" << BSON_ARRAY(preparedState << inProgressState))), + Query().sort(SessionTxnRecord::kStartOpTimeFieldName.toString(), 1), &transactionTableOpTimeFields, QueryOption_SecondaryOk, ReadConcernArgs(ReadConcernLevel::kMajorityReadConcern).toBSONInner()); @@ -1875,6 +1876,7 @@ void TenantMigrationRecipientService::Instance::_fetchAndStoreDonorClusterTimeKe std::vector<ExternalKeysCollectionDocument> keyDocs; auto cursor = _client->query(NamespaceString::kKeysCollectionNamespace, + BSONObj{}, Query().readPref(_readPreference.pref, _readPreference.tags.getTagBSON())); while (cursor->more()) { const auto doc = cursor->nextSafe().getOwned(); @@ -1892,7 +1894,8 @@ void TenantMigrationRecipientService::Instance::_compareRecipientAndDonorFCV() c auto donorFCVbson = _client->findOne(NamespaceString::kServerConfigurationNamespace.ns(), - QUERY("_id" << FeatureCompatibilityVersionParser::kParameterName), + BSON("_id" << FeatureCompatibilityVersionParser::kParameterName), + Query(), nullptr, QueryOption_SecondaryOk, ReadConcernArgs(ReadConcernLevel::kMajorityReadConcern).toBSONInner()); diff --git a/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp b/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp index bb85e38ae82..0b44a1155f3 100644 --- a/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp +++ b/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp @@ -352,7 +352,7 @@ protected: const std::string& nss, const std::vector<HostAndPort>& hosts) { for (const auto& host : hosts) { - replSet->getNode(host.toString())->remove(nss, Query()); + replSet->getNode(host.toString())->remove(nss, BSONObj{} /*filter*/); } } diff --git a/src/mongo/db/repl/transaction_oplog_application.cpp b/src/mongo/db/repl/transaction_oplog_application.cpp index 6ff76d34087..103a5c4e149 100644 --- a/src/mongo/db/repl/transaction_oplog_application.cpp +++ b/src/mongo/db/repl/transaction_oplog_application.cpp @@ -556,8 +556,8 @@ void reconstructPreparedTransactions(OperationContext* opCtx, repl::OplogApplica DBDirectClient client(opCtx); const auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace, - {BSON("state" - << "prepared")}); + BSON("state" + << "prepared")); // Iterate over each entry in the transactions table that has a prepared transaction. while (cursor->more()) { diff --git a/src/mongo/db/rs_local_client.cpp b/src/mongo/db/rs_local_client.cpp index f4a668bed7c..9c6a455e70d 100644 --- a/src/mongo/db/rs_local_client.cpp +++ b/src/mongo/db/rs_local_client.cpp @@ -124,18 +124,18 @@ StatusWith<Shard::QueryResponse> RSLocalClient::queryOnce( } DBDirectClient client(opCtx); - Query fullQuery(query); + Query querySettings; if (!sort.isEmpty()) { - fullQuery.sort(sort); + querySettings.sort(sort); } if (hint) { - fullQuery.hint(*hint); + querySettings.hint(*hint); } - fullQuery.readPref(readPref.pref, BSONArray()); + querySettings.readPref(readPref.pref, BSONArray()); try { std::unique_ptr<DBClientCursor> cursor = - client.query(nss, fullQuery, limit.get_value_or(0)); + client.query(nss, query, querySettings, limit.get_value_or(0)); if (!cursor) { return {ErrorCodes::OperationFailed, diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp index ee5fc3b8a16..01b84977aa3 100644 --- a/src/mongo/db/s/chunk_splitter.cpp +++ b/src/mongo/db/s/chunk_splitter.cpp @@ -172,6 +172,7 @@ BSONObj findExtremeKeyForShard(OperationContext* opCtx, // upper bound. Chunk range upper bounds are exclusive so skip a document to // make the lower half of the split end up with a single document. std::unique_ptr<DBClientCursor> cursor = client.query(nss, + BSONObj{}, q, 1, /* limit */ 1 /* nToSkip */); @@ -185,7 +186,7 @@ BSONObj findExtremeKeyForShard(OperationContext* opCtx, end = cursor->next().getOwned(); } } else { - end = client.findOne(nss.ns(), q); + end = client.findOne(nss.ns(), BSONObj{}, q); } if (end.isEmpty()) { diff --git a/src/mongo/db/s/config/configsvr_abort_reshard_collection_command.cpp b/src/mongo/db/s/config/configsvr_abort_reshard_collection_command.cpp index 69fc86df0e6..8279c18a332 100644 --- a/src/mongo/db/s/config/configsvr_abort_reshard_collection_command.cpp +++ b/src/mongo/db/s/config/configsvr_abort_reshard_collection_command.cpp @@ -65,7 +65,7 @@ void assertExistsReshardingDocument(OperationContext* opCtx, UUID reshardingUUID boost::optional<ReshardingCoordinatorDocument> docOptional; store.forEach(opCtx, - QUERY(ReshardingCoordinatorDocument::kReshardingUUIDFieldName << reshardingUUID), + BSON(ReshardingCoordinatorDocument::kReshardingUUIDFieldName << reshardingUUID), [&](const ReshardingCoordinatorDocument& doc) { docOptional.emplace(doc); return false; diff --git a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp index 8434c02eeaa..d1857fe231a 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp @@ -163,7 +163,7 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx, (std::string) "^" + pcrecpp::RE::QuoteMeta(dbName.toString()) + "$", "i"); - auto dbDoc = client.findOne(DatabaseType::ConfigNS.ns(), {queryBuilder.obj()}); + auto dbDoc = client.findOne(DatabaseType::ConfigNS.ns(), queryBuilder.obj()); auto const [primaryShardPtr, database] = [&] { if (!dbDoc.isEmpty()) { auto actualDb = uassertStatusOK(DatabaseType::fromBSON(dbDoc)); diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp index f514802a291..4ece89a7c0e 100644 --- a/src/mongo/db/s/create_collection_coordinator.cpp +++ b/src/mongo/db/s/create_collection_coordinator.cpp @@ -156,7 +156,7 @@ bool checkIfCollectionIsEmpty(OperationContext* opCtx, const NamespaceString& ns // command doesn't just consult the cached metadata, which may not always be // correct DBDirectClient localClient(opCtx); - return localClient.findOne(nss.ns(), Query()).isEmpty(); + return localClient.findOne(nss.ns(), BSONObj{}).isEmpty(); } int getNumShards(OperationContext* opCtx) { diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp index ffdf12f08c8..72c4000e4eb 100644 --- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp +++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp @@ -137,8 +137,8 @@ protected: ASSERT_GT(response["n"].Int(), 0); } - void updateDocsInShardedCollection(BSONObj query, BSONObj updated) { - auto response = client()->updateAcknowledged(kNss.ns(), query, updated); + void updateDocsInShardedCollection(BSONObj filter, BSONObj updated) { + auto response = client()->updateAcknowledged(kNss.ns(), filter, updated); ASSERT_OK(getStatusFromWriteCommandReply(response)); ASSERT_GT(response["n"].Int(), 0); } diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp index 99a11b8a251..bcb36814568 100644 --- a/src/mongo/db/s/migration_util.cpp +++ b/src/mongo/db/s/migration_util.cpp @@ -321,11 +321,11 @@ ChunkRange extendOrTruncateBoundsForMetadata(const CollectionMetadata& metadata, } } -Query overlappingRangeQuery(const ChunkRange& range, const UUID& uuid) { - return QUERY(RangeDeletionTask::kCollectionUuidFieldName - << uuid << RangeDeletionTask::kRangeFieldName + "." + ChunkRange::kMinKey << LT - << range.getMax() << RangeDeletionTask::kRangeFieldName + "." + ChunkRange::kMaxKey - << GT << range.getMin()); +BSONObj overlappingRangeQuery(const ChunkRange& range, const UUID& uuid) { + return BSON(RangeDeletionTask::kCollectionUuidFieldName + << uuid << RangeDeletionTask::kRangeFieldName + "." + ChunkRange::kMinKey << LT + << range.getMax() << RangeDeletionTask::kRangeFieldName + "." + ChunkRange::kMaxKey + << GT << range.getMin()); } size_t checkForConflictingDeletions(OperationContext* opCtx, @@ -501,7 +501,7 @@ ExecutorFuture<void> submitRangeDeletionTask(OperationContext* opCtx, void submitPendingDeletions(OperationContext* opCtx) { PersistentTaskStore<RangeDeletionTask> store(NamespaceString::kRangeDeletionNamespace); - auto query = QUERY("pending" << BSON("$exists" << false)); + auto query = BSON("pending" << BSON("$exists" << false)); store.forEach(opCtx, query, [&opCtx](const RangeDeletionTask& deletionTask) { migrationutil::submitRangeDeletionTask(opCtx, deletionTask).getAsync([](auto) {}); @@ -692,7 +692,7 @@ void persistCommitDecision(OperationContext* opCtx, PersistentTaskStore<MigrationCoordinatorDocument> store( NamespaceString::kMigrationCoordinatorsNamespace); store.upsert(opCtx, - QUERY(MigrationCoordinatorDocument::kIdFieldName << migrationDoc.getId()), + BSON(MigrationCoordinatorDocument::kIdFieldName << migrationDoc.getId()), migrationDoc.toBSON()); if (hangInPersistMigrateCommitDecisionThenSimulateErrorUninterruptible.shouldFail()) { @@ -711,7 +711,7 @@ void persistAbortDecision(OperationContext* opCtx, PersistentTaskStore<MigrationCoordinatorDocument> store( NamespaceString::kMigrationCoordinatorsNamespace); store.upsert(opCtx, - QUERY(MigrationCoordinatorDocument::kIdFieldName << migrationDoc.getId()), + BSON(MigrationCoordinatorDocument::kIdFieldName << migrationDoc.getId()), migrationDoc.toBSON()); if (hangInPersistMigrateAbortDecisionThenSimulateErrorUninterruptible.shouldFail()) { @@ -748,7 +748,7 @@ void deleteRangeDeletionTaskLocally(OperationContext* opCtx, const WriteConcernOptions& writeConcern) { hangInDeleteRangeDeletionLocallyInterruptible.pauseWhileSet(opCtx); PersistentTaskStore<RangeDeletionTask> store(NamespaceString::kRangeDeletionNamespace); - store.remove(opCtx, QUERY(RangeDeletionTask::kIdFieldName << deletionTaskId), writeConcern); + store.remove(opCtx, BSON(RangeDeletionTask::kIdFieldName << deletionTaskId), writeConcern); if (hangInDeleteRangeDeletionLocallyThenSimulateErrorUninterruptible.shouldFail()) { hangInDeleteRangeDeletionLocallyThenSimulateErrorUninterruptible.pauseWhileSet(opCtx); @@ -829,7 +829,7 @@ void advanceTransactionOnRecipient(OperationContext* opCtx, void markAsReadyRangeDeletionTaskLocally(OperationContext* opCtx, const UUID& migrationId) { PersistentTaskStore<RangeDeletionTask> store(NamespaceString::kRangeDeletionNamespace); - auto query = QUERY(RangeDeletionTask::kIdFieldName << migrationId); + auto query = BSON(RangeDeletionTask::kIdFieldName << migrationId); auto update = BSON("$unset" << BSON(RangeDeletionTask::kPendingFieldName << "")); hangInReadyRangeDeletionLocallyInterruptible.pauseWhileSet(opCtx); @@ -851,7 +851,7 @@ void deleteMigrationCoordinatorDocumentLocally(OperationContext* opCtx, const UU PersistentTaskStore<MigrationCoordinatorDocument> store( NamespaceString::kMigrationCoordinatorsNamespace); store.remove(opCtx, - QUERY(MigrationCoordinatorDocument::kIdFieldName << migrationId), + BSON(MigrationCoordinatorDocument::kIdFieldName << migrationId), {1, WriteConcernOptions::SyncMode::UNSET, Seconds(0)}); } @@ -899,7 +899,7 @@ void resumeMigrationCoordinationsOnStepUp(OperationContext* opCtx) { PersistentTaskStore<MigrationCoordinatorDocument> store( NamespaceString::kMigrationCoordinatorsNamespace); store.forEach(opCtx, - Query{}, + BSONObj{}, [&opCtx, &unfinishedMigrationsCount](const MigrationCoordinatorDocument& doc) { // MigrationCoordinators are only created under the MigrationBlockingGuard, // which means that only one can possibly exist on an instance at a time. @@ -973,7 +973,7 @@ void recoverMigrationCoordinations(OperationContext* opCtx, NamespaceString nss) NamespaceString::kMigrationCoordinatorsNamespace); store.forEach( opCtx, - QUERY(MigrationCoordinatorDocument::kNssFieldName << nss.toString()), + BSON(MigrationCoordinatorDocument::kNssFieldName << nss.toString()), [&opCtx, &migrationRecoveryCount](const MigrationCoordinatorDocument& doc) { LOGV2_DEBUG(4798502, 2, diff --git a/src/mongo/db/s/migration_util.h b/src/mongo/db/s/migration_util.h index 865056b36a3..780357dbc6d 100644 --- a/src/mongo/db/s/migration_util.h +++ b/src/mongo/db/s/migration_util.h @@ -85,7 +85,7 @@ std::shared_ptr<executor::ThreadPoolTaskExecutor> getMigrationUtilExecutor( * Creates a query object that can used to find overlapping ranges in the pending range deletions * collection. */ -Query overlappingRangeQuery(const ChunkRange& range, const UUID& uuid); +BSONObj overlappingRangeQuery(const ChunkRange& range, const UUID& uuid); /** * Checks the pending range deletions collection to see if there are any pending ranges that diff --git a/src/mongo/db/s/persistent_task_queue.h b/src/mongo/db/s/persistent_task_queue.h index 05db3cb68cb..095dd3c9d55 100644 --- a/src/mongo/db/s/persistent_task_queue.h +++ b/src/mongo/db/s/persistent_task_queue.h @@ -110,7 +110,7 @@ PersistentTaskQueue<T>::PersistentTaskQueue(OperationContext* opCtx, NamespaceSt DBDirectClient client(opCtx); auto projection = BSON("_id" << 1); - auto cursor = client.query(_storageNss, Query(), 0, 0, &projection); + auto cursor = client.query(_storageNss, BSONObj{}, Query(), 0, 0, &projection); _count = cursor->itcount(); if (_count > 0) @@ -203,15 +203,15 @@ bool PersistentTaskQueue<T>::empty(OperationContext* opCtx) const { template <typename T> TaskId PersistentTaskQueue<T>::_loadLastId(DBDirectClient& client) { auto fieldsToReturn = BSON("_id" << 1); - auto maxId = - client.findOne(_storageNss.toString(), Query().sort(BSON("_id" << -1)), &fieldsToReturn); + auto maxId = client.findOne( + _storageNss.toString(), BSONObj{}, Query().sort(BSON("_id" << -1)), &fieldsToReturn); return maxId.getField("_id").Long(); } template <typename T> typename boost::optional<typename BlockingTaskQueue<T>::Record> PersistentTaskQueue<T>::_loadNextRecord(DBDirectClient& client) { - auto bson = client.findOne(_storageNss.toString(), Query().sort("_id")); + auto bson = client.findOne(_storageNss.toString(), BSONObj{}, Query().sort("_id")); boost::optional<typename PersistentTaskQueue<T>::Record> result; diff --git a/src/mongo/db/s/range_deletion_util.cpp b/src/mongo/db/s/range_deletion_util.cpp index 4cc40fb0f3e..5ad24b06cef 100644 --- a/src/mongo/db/s/range_deletion_util.cpp +++ b/src/mongo/db/s/range_deletion_util.cpp @@ -270,9 +270,9 @@ void ensureRangeDeletionTaskStillExists(OperationContext* opCtx, const UUID& mig // for deleting the range deletion task document. PersistentTaskStore<RangeDeletionTask> store(NamespaceString::kRangeDeletionNamespace); auto count = store.count(opCtx, - QUERY(RangeDeletionTask::kIdFieldName - << migrationId << RangeDeletionTask::kPendingFieldName - << BSON("$exists" << false))); + BSON(RangeDeletionTask::kIdFieldName + << migrationId << RangeDeletionTask::kPendingFieldName + << BSON("$exists" << false))); invariant(count == 0 || count == 1, "found duplicate range deletion tasks"); uassert(ErrorCodes::RangeDeletionAbandonedBecauseTaskDocumentDoesNotExist, "Range deletion task no longer exists", @@ -380,7 +380,7 @@ void removePersistentRangeDeletionTask(const NamespaceString& nss, UUID migratio withTemporaryOperationContext([&](OperationContext* opCtx) { PersistentTaskStore<RangeDeletionTask> store(NamespaceString::kRangeDeletionNamespace); - store.remove(opCtx, QUERY(RangeDeletionTask::kIdFieldName << migrationId)); + store.remove(opCtx, BSON(RangeDeletionTask::kIdFieldName << migrationId)); }); } @@ -413,7 +413,7 @@ std::vector<RangeDeletionTask> getPersistentRangeDeletionTasks(OperationContext* std::vector<RangeDeletionTask> tasks; PersistentTaskStore<RangeDeletionTask> store(NamespaceString::kRangeDeletionNamespace); - auto query = QUERY(RangeDeletionTask::kNssFieldName << nss.ns()); + auto query = BSON(RangeDeletionTask::kNssFieldName << nss.ns()); store.forEach(opCtx, query, [&](const RangeDeletionTask& deletionTask) { tasks.push_back(std::move(deletionTask)); @@ -431,7 +431,7 @@ void snapshotRangeDeletionsForRename(OperationContext* opCtx, // Clear out eventual snapshots associated with the target collection: always restart from a // clean state in case of stepdown or primary killed. PersistentTaskStore<RangeDeletionTask> store(NamespaceString::kRangeDeletionForRenameNamespace); - store.remove(opCtx, QUERY(RangeDeletionTask::kNssFieldName << toNss.ns())); + store.remove(opCtx, BSON(RangeDeletionTask::kNssFieldName << toNss.ns())); auto rangeDeletionTasks = getPersistentRangeDeletionTasks(opCtx, fromNss); for (auto& task : rangeDeletionTasks) { @@ -449,7 +449,7 @@ void restoreRangeDeletionTasksForRename(OperationContext* opCtx, const Namespace PersistentTaskStore<RangeDeletionTask> rangeDeletionsStore( NamespaceString::kRangeDeletionNamespace); - const auto query = QUERY(RangeDeletionTask::kNssFieldName << nss.ns()); + const auto query = BSON(RangeDeletionTask::kNssFieldName << nss.ns()); rangeDeletionsForRenameStore.forEach(opCtx, query, [&](const RangeDeletionTask& deletionTask) { try { @@ -467,13 +467,13 @@ void deleteRangeDeletionTasksForRename(OperationContext* opCtx, // Delete range deletion tasks associated to the source collection PersistentTaskStore<RangeDeletionTask> rangeDeletionsStore( NamespaceString::kRangeDeletionNamespace); - rangeDeletionsStore.remove(opCtx, QUERY(RangeDeletionTask::kNssFieldName << fromNss.ns())); + rangeDeletionsStore.remove(opCtx, BSON(RangeDeletionTask::kNssFieldName << fromNss.ns())); // Delete already restored snapshots associated to the target collection PersistentTaskStore<RangeDeletionTask> rangeDeletionsForRenameStore( NamespaceString::kRangeDeletionForRenameNamespace); rangeDeletionsForRenameStore.remove(opCtx, - QUERY(RangeDeletionTask::kNssFieldName << toNss.ns())); + BSON(RangeDeletionTask::kNssFieldName << toNss.ns())); } diff --git a/src/mongo/db/s/range_deletion_util_test.cpp b/src/mongo/db/s/range_deletion_util_test.cpp index 1b0e75d6327..8a42ae17b8c 100644 --- a/src/mongo/db/s/range_deletion_util_test.cpp +++ b/src/mongo/db/s/range_deletion_util_test.cpp @@ -189,7 +189,7 @@ RangeDeletionTask insertRangeDeletionTask(OperationContext* opCtx, UUID uuid, Ch // Document should be in the store. ASSERT_EQUALS(countDocsInConfigRangeDeletions(store, opCtx), 1); - auto query = QUERY(RangeDeletionTask::kIdFieldName << migrationId); + auto query = BSON(RangeDeletionTask::kIdFieldName << migrationId); t.setPending(boost::none); auto update = t.toBSON(); store.update(opCtx, query, update); diff --git a/src/mongo/db/s/recoverable_critical_section_service.cpp b/src/mongo/db/s/recoverable_critical_section_service.cpp index 371de82a6e7..28393506183 100644 --- a/src/mongo/db/s/recoverable_critical_section_service.cpp +++ b/src/mongo/db/s/recoverable_critical_section_service.cpp @@ -359,7 +359,7 @@ void RecoverableCriticalSectionService::recoverRecoverableCriticalSections( // Map the critical sections that are on disk to memory PersistentTaskStore<CollectionCriticalSectionDocument> store( NamespaceString::kCollectionCriticalSectionsNamespace); - store.forEach(opCtx, Query{}, [&opCtx](const CollectionCriticalSectionDocument& doc) { + store.forEach(opCtx, BSONObj{}, [&opCtx](const CollectionCriticalSectionDocument& doc) { const auto& nss = doc.getNss(); { AutoGetCollection collLock(opCtx, nss, MODE_X); diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp index 61ddc7e13cd..46f815acd76 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp +++ b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp @@ -228,7 +228,8 @@ public: ReshardingCoordinatorDocument getCoordinatorDoc(OperationContext* opCtx) { DBDirectClient client(opCtx); - auto doc = client.findOne(NamespaceString::kConfigReshardingOperationsNamespace.ns(), {}); + auto doc = + client.findOne(NamespaceString::kConfigReshardingOperationsNamespace.ns(), BSONObj{}); IDLParserErrorContext errCtx("reshardingCoordFromTest"); return ReshardingCoordinatorDocument::parse(errCtx, doc); } diff --git a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp index 0b2d6b162ba..d970eff8635 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp +++ b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp @@ -237,7 +237,7 @@ protected: OperationContext* opCtx, ReshardingCoordinatorDocument expectedCoordinatorDoc) { DBDirectClient client(opCtx); auto doc = client.findOne(NamespaceString::kConfigReshardingOperationsNamespace.ns(), - Query(BSON("ns" << expectedCoordinatorDoc.getSourceNss().ns()))); + BSON("ns" << expectedCoordinatorDoc.getSourceNss().ns())); auto coordinatorDoc = ReshardingCoordinatorDocument::parse( IDLParserErrorContext("ReshardingCoordinatorTest"), doc); @@ -319,7 +319,7 @@ protected: const ReshardingCoordinatorDocument& expectedCoordinatorDoc) { DBDirectClient client(opCtx); CollectionType onDiskEntry( - client.findOne(CollectionType::ConfigNS.ns(), Query(BSON("_id" << _originalNss.ns())))); + client.findOne(CollectionType::ConfigNS.ns(), BSON("_id" << _originalNss.ns()))); ASSERT_EQUALS(onDiskEntry.getAllowMigrations(), expectedCollType.getAllowMigrations()); @@ -378,8 +378,7 @@ protected: void assertTemporaryCollectionCatalogEntryMatchesExpected( OperationContext* opCtx, boost::optional<CollectionType> expectedCollType) { DBDirectClient client(opCtx); - auto doc = - client.findOne(CollectionType::ConfigNS.ns(), Query(BSON("_id" << _tempNss.ns()))); + auto doc = client.findOne(CollectionType::ConfigNS.ns(), BSON("_id" << _tempNss.ns())); if (!expectedCollType) { ASSERT(doc.isEmpty()); return; @@ -423,7 +422,7 @@ protected: const Timestamp& collTimestamp) { DBDirectClient client(opCtx); std::vector<ChunkType> foundChunks; - auto cursor = client.query(ChunkType::ConfigNS, Query(BSON("uuid" << uuid))); + auto cursor = client.query(ChunkType::ConfigNS, BSON("uuid" << uuid)); while (cursor->more()) { auto d = uassertStatusOK( ChunkType::fromConfigBSON(cursor->nextSafe().getOwned(), collEpoch, collTimestamp)); @@ -449,7 +448,7 @@ protected: DBDirectClient client(opCtx); std::vector<TagsType> foundZones; - auto cursor = client.query(TagsType::ConfigNS, Query(BSON("ns" << nss.ns()))); + auto cursor = client.query(TagsType::ConfigNS, BSON("ns" << nss.ns())); while (cursor->more()) { foundZones.push_back( uassertStatusOK(TagsType::fromBSON(cursor->nextSafe().getOwned()))); @@ -618,11 +617,10 @@ protected: // Check that chunks and tags under the temp namespace have been removed DBDirectClient client(opCtx); - auto chunkDoc = - client.findOne(ChunkType::ConfigNS.ns(), Query(BSON("ns" << _tempNss.ns()))); + auto chunkDoc = client.findOne(ChunkType::ConfigNS.ns(), BSON("ns" << _tempNss.ns())); ASSERT(chunkDoc.isEmpty()); - auto tagDoc = client.findOne(TagsType::ConfigNS.ns(), Query(BSON("ns" << _tempNss.ns()))); + auto tagDoc = client.findOne(TagsType::ConfigNS.ns(), BSON("ns" << _tempNss.ns())); ASSERT(tagDoc.isEmpty()); // Check that chunks and tags entries previously under the temporary namespace have been @@ -642,7 +640,7 @@ protected: // Check that the entry is removed from config.reshardingOperations DBDirectClient client(opCtx); auto doc = client.findOne(NamespaceString::kConfigReshardingOperationsNamespace.ns(), - Query(BSON("ns" << expectedCoordinatorDoc.getSourceNss().ns()))); + BSON("ns" << expectedCoordinatorDoc.getSourceNss().ns())); ASSERT(doc.isEmpty()); // Check that the resharding fields are removed from the config.collections entry and diff --git a/src/mongo/db/s/resharding/resharding_data_copy_util.cpp b/src/mongo/db/s/resharding/resharding_data_copy_util.cpp index 2fed3542cd0..3461c15b815 100644 --- a/src/mongo/db/s/resharding/resharding_data_copy_util.cpp +++ b/src/mongo/db/s/resharding/resharding_data_copy_util.cpp @@ -105,8 +105,8 @@ void ensureOplogCollectionsDropped(OperationContext* opCtx, NamespaceString::kReshardingApplierProgressNamespace); oplogApplierProgressStore.remove( opCtx, - QUERY(ReshardingOplogApplierProgress::kOplogSourceIdFieldName - << reshardingSourceId.toBSON()), + BSON(ReshardingOplogApplierProgress::kOplogSourceIdFieldName + << reshardingSourceId.toBSON()), WriteConcernOptions()); // Remove the txn cloner progress doc for this donor. @@ -114,7 +114,7 @@ void ensureOplogCollectionsDropped(OperationContext* opCtx, NamespaceString::kReshardingTxnClonerProgressNamespace); txnClonerProgressStore.remove( opCtx, - QUERY(ReshardingTxnClonerProgress::kSourceIdFieldName << reshardingSourceId.toBSON()), + BSON(ReshardingTxnClonerProgress::kSourceIdFieldName << reshardingSourceId.toBSON()), WriteConcernOptions()); // Drop the conflict stash collection for this donor. diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp b/src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp index 2e9358925a7..853f71024b6 100644 --- a/src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp +++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp @@ -312,7 +312,7 @@ void clearFilteringMetadata(OperationContext* opCtx, bool scheduleAsyncRefresh) NamespaceString::kRecipientReshardingOperationsNamespace}) { PersistentTaskStore<CommonReshardingMetadata> store(homeToReshardingDocs); - store.forEach(opCtx, Query(), [&](CommonReshardingMetadata reshardingDoc) -> bool { + store.forEach(opCtx, BSONObj{}, [&](CommonReshardingMetadata reshardingDoc) -> bool { namespacesToRefresh.insert(reshardingDoc.getSourceNss()); namespacesToRefresh.insert(reshardingDoc.getTempReshardingNss()); diff --git a/src/mongo/db/s/resharding/resharding_donor_service_test.cpp b/src/mongo/db/s/resharding/resharding_donor_service_test.cpp index 202e5898b79..47e35c202e7 100644 --- a/src/mongo/db/s/resharding/resharding_donor_service_test.cpp +++ b/src/mongo/db/s/resharding/resharding_donor_service_test.cpp @@ -693,7 +693,7 @@ TEST_F(ReshardingDonorServiceTest, TruncatesXLErrorOnDonorDocument) { NamespaceString::kDonorReshardingOperationsNamespace); store.forEach( opCtx.get(), - QUERY(ReshardingDonorDocument::kReshardingUUIDFieldName << doc.getReshardingUUID()), + BSON(ReshardingDonorDocument::kReshardingUUIDFieldName << doc.getReshardingUUID()), [&](const auto& donorDocument) { persistedDonorDocument.emplace(donorDocument); return false; diff --git a/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp b/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp index cb6efb31d30..7cd00b1f637 100644 --- a/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp +++ b/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp @@ -132,7 +132,7 @@ ReshardingCleaner<Service, StateMachine, ReshardingDocument>::_fetchReshardingDo OperationContext* opCtx) { boost::optional<ReshardingDocument> docOptional; _store.forEach(opCtx, - QUERY(ReshardingDocument::kReshardingUUIDFieldName << _reshardingUUID), + BSON(ReshardingDocument::kReshardingUUIDFieldName << _reshardingUUID), [&](const ReshardingDocument& doc) { docOptional.emplace(doc); return false; diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier.cpp index ad2f2e399ad..1e47f26b6c0 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_applier.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_applier.cpp @@ -230,7 +230,7 @@ void ReshardingOplogApplier::_clearAppliedOpsAndStoreProgress(OperationContext* store.upsert( opCtx, - QUERY(ReshardingOplogApplierProgress::kOplogSourceIdFieldName << _sourceId.toBSON()), + BSON(ReshardingOplogApplierProgress::kOplogSourceIdFieldName << _sourceId.toBSON()), builder.obj()); _env->metrics()->onOplogEntriesApplied(_currentBatchToApply.size()); diff --git a/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp index c97b47ffce8..c21596f18e5 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp @@ -229,7 +229,7 @@ public: std::vector<repl::DurableOplogEntry> result; PersistentTaskStore<repl::OplogEntryBase> store(NamespaceString::kRsOplogNamespace); - store.forEach(opCtx, QUERY("ts" << BSON("$gt" << ts)), [&](const auto& oplogEntry) { + store.forEach(opCtx, BSON("ts" << BSON("$gt" << ts)), [&](const auto& oplogEntry) { result.emplace_back( unittest::assertGet(repl::DurableOplogEntry::parse(oplogEntry.toBSON()))); return true; @@ -245,7 +245,7 @@ public: PersistentTaskStore<SessionTxnRecord> store( NamespaceString::kSessionTransactionsTableNamespace); store.forEach(opCtx, - QUERY(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON()), + BSON(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON()), [&](const auto& sessionTxnRecord) { result.emplace(sessionTxnRecord); return false; diff --git a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp index b933e609fc1..a88783eda55 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp @@ -206,27 +206,26 @@ public: std::vector<SlimApplyOpsInfo> result; PersistentTaskStore<repl::OplogEntryBase> store(NamespaceString::kRsOplogNamespace); - store.forEach( - opCtx, - QUERY("op" - << "c" - << "o.applyOps" << BSON("$exists" << true) << "ts" << BSON("$gt" << ts)), - [&](const auto& oplogEntry) { - auto applyOpsCmd = oplogEntry.getObject().getOwned(); - auto applyOpsInfo = repl::ApplyOpsCommandInfo::parse(applyOpsCmd); - - std::vector<repl::DurableReplOperation> operations; - operations.reserve(applyOpsInfo.getOperations().size()); - - for (const auto& innerOp : applyOpsInfo.getOperations()) { - operations.emplace_back( - repl::DurableReplOperation::parse({"findApplyOpsNewerThan"}, innerOp)); - } - - result.emplace_back( - SlimApplyOpsInfo{std::move(applyOpsCmd), std::move(operations)}); - return true; - }); + store.forEach(opCtx, + BSON("op" + << "c" + << "o.applyOps" << BSON("$exists" << true) << "ts" << BSON("$gt" << ts)), + [&](const auto& oplogEntry) { + auto applyOpsCmd = oplogEntry.getObject().getOwned(); + auto applyOpsInfo = repl::ApplyOpsCommandInfo::parse(applyOpsCmd); + + std::vector<repl::DurableReplOperation> operations; + operations.reserve(applyOpsInfo.getOperations().size()); + + for (const auto& innerOp : applyOpsInfo.getOperations()) { + operations.emplace_back(repl::DurableReplOperation::parse( + {"findApplyOpsNewerThan"}, innerOp)); + } + + result.emplace_back( + SlimApplyOpsInfo{std::move(applyOpsCmd), std::move(operations)}); + return true; + }); return result; } diff --git a/src/mongo/db/s/resharding/resharding_oplog_session_application_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_session_application_test.cpp index 14aed43039a..61a519c7493 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_session_application_test.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_session_application_test.cpp @@ -174,7 +174,7 @@ public: std::vector<repl::DurableOplogEntry> result; PersistentTaskStore<repl::OplogEntryBase> store(NamespaceString::kRsOplogNamespace); - store.forEach(opCtx, QUERY("ts" << BSON("$gt" << ts)), [&](const auto& oplogEntry) { + store.forEach(opCtx, BSON("ts" << BSON("$gt" << ts)), [&](const auto& oplogEntry) { result.emplace_back( unittest::assertGet(repl::DurableOplogEntry::parse(oplogEntry.toBSON()))); return true; @@ -190,7 +190,7 @@ public: PersistentTaskStore<SessionTxnRecord> store( NamespaceString::kSessionTransactionsTableNamespace); store.forEach(opCtx, - QUERY(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON()), + BSON(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON()), [&](const auto& sessionTxnRecord) { result.emplace(sessionTxnRecord); return false; diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp index 481d04a3556..5640ea3bac9 100644 --- a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp +++ b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp @@ -627,8 +627,8 @@ TEST_F(ReshardingRecipientServiceTest, TruncatesXLErrorOnRecipientDocument) { PersistentTaskStore<ReshardingRecipientDocument> store( NamespaceString::kRecipientReshardingOperationsNamespace); store.forEach(opCtx.get(), - QUERY(ReshardingRecipientDocument::kReshardingUUIDFieldName - << doc.getReshardingUUID()), + BSON(ReshardingRecipientDocument::kReshardingUUIDFieldName + << doc.getReshardingUUID()), [&](const auto& recipientDocument) { persistedRecipientDocument.emplace(recipientDocument); return false; diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner.cpp b/src/mongo/db/s/resharding/resharding_txn_cloner.cpp index c0600fbbc5d..2ffa4e40ab0 100644 --- a/src/mongo/db/s/resharding/resharding_txn_cloner.cpp +++ b/src/mongo/db/s/resharding/resharding_txn_cloner.cpp @@ -111,7 +111,7 @@ boost::optional<LogicalSessionId> ReshardingTxnCloner::_fetchProgressLsid(Operat boost::optional<LogicalSessionId> progressLsid; store.forEach(opCtx, - QUERY(ReshardingTxnClonerProgress::kSourceIdFieldName << _sourceId.toBSON()), + BSON(ReshardingTxnClonerProgress::kSourceIdFieldName << _sourceId.toBSON()), [&](const auto& doc) { progressLsid = doc.getProgress(); return false; @@ -184,7 +184,7 @@ void ReshardingTxnCloner::_updateProgressDocument(OperationContext* opCtx, store.upsert( opCtx, - QUERY(ReshardingTxnClonerProgress::kSourceIdFieldName << _sourceId.toBSON()), + BSON(ReshardingTxnClonerProgress::kSourceIdFieldName << _sourceId.toBSON()), BSON("$set" << BSON(ReshardingTxnClonerProgress::kProgressFieldName << progress.toBSON())), {1, WriteConcernOptions::SyncMode::UNSET, Seconds(0)}); } diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp index 1b22d39f53c..e302f943693 100644 --- a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp +++ b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp @@ -253,9 +253,10 @@ protected: DBDirectClient client(operationContext()); // The same logical session entry may be inserted more than once by a test case, so use a // $natural sort to find the most recently inserted entry. - Query oplogQuery(BSON(repl::OplogEntryBase::kSessionIdFieldName << sessionId.toBSON())); - auto bsonOplog = client.findOne(NamespaceString::kRsOplogNamespace.ns(), - oplogQuery.sort(BSON("$natural" << -1))); + auto bsonOplog = + client.findOne(NamespaceString::kRsOplogNamespace.ns(), + BSON(repl::OplogEntryBase::kSessionIdFieldName << sessionId.toBSON()), + Query().sort(BSON("$natural" << -1))); ASSERT(!bsonOplog.isEmpty()); auto oplogEntry = repl::MutableOplogEntry::parse(bsonOplog).getValue(); ASSERT_EQ(oplogEntry.getTxnNumber().get(), txnNum); @@ -265,7 +266,7 @@ protected: auto bsonTxn = client.findOne(NamespaceString::kSessionTransactionsTableNamespace.ns(), - {BSON(SessionTxnRecord::kSessionIdFieldName << sessionId.toBSON())}); + BSON(SessionTxnRecord::kSessionIdFieldName << sessionId.toBSON())); ASSERT(!bsonTxn.isEmpty()); auto txn = SessionTxnRecord::parse( IDLParserErrorContext("resharding config transactions cloning test"), bsonTxn); @@ -422,7 +423,7 @@ protected: std::vector<repl::DurableOplogEntry> result; PersistentTaskStore<repl::OplogEntryBase> store(NamespaceString::kRsOplogNamespace); - store.forEach(opCtx, QUERY("ts" << BSON("$gt" << ts)), [&](const auto& oplogEntry) { + store.forEach(opCtx, BSON("ts" << BSON("$gt" << ts)), [&](const auto& oplogEntry) { result.emplace_back( unittest::assertGet(repl::DurableOplogEntry::parse(oplogEntry.toBSON()))); return true; @@ -438,7 +439,7 @@ protected: PersistentTaskStore<SessionTxnRecord> store( NamespaceString::kSessionTransactionsTableNamespace); store.forEach(opCtx, - QUERY(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON()), + BSON(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON()), [&](const auto& sessionTxnRecord) { result.emplace(sessionTxnRecord); return false; diff --git a/src/mongo/db/s/resharding_destined_recipient_test.cpp b/src/mongo/db/s/resharding_destined_recipient_test.cpp index 362a88e9c30..8943f73ffb9 100644 --- a/src/mongo/db/s/resharding_destined_recipient_test.cpp +++ b/src/mongo/db/s/resharding_destined_recipient_test.cpp @@ -391,7 +391,7 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnMultiUpdates) OperationShardingState::get(opCtx).initializeClientRoutingVersions( kNss, ChunkVersion::IGNORED(), env.dbVersion); client.update(kNss.ns(), - Query{BSON("x" << 0)}, + BSON("x" << 0), BSON("$set" << BSON("z" << 5)), false /*upsert*/, true /*multi*/); diff --git a/src/mongo/db/s/session_catalog_migration_destination_test.cpp b/src/mongo/db/s/session_catalog_migration_destination_test.cpp index 1768d029e9e..7d770e47cff 100644 --- a/src/mongo/db/s/session_catalog_migration_destination_test.cpp +++ b/src/mongo/db/s/session_catalog_migration_destination_test.cpp @@ -1994,7 +1994,7 @@ TEST_F(SessionCatalogMigrationDestinationTest, MigratingKnownStmtWhileOplogTrunc { // Confirm that oplog is indeed empty. DBDirectClient client(opCtx); - auto result = client.findOne(NamespaceString::kRsOplogNamespace.ns(), {}); + auto result = client.findOne(NamespaceString::kRsOplogNamespace.ns(), BSONObj{}); ASSERT_TRUE(result.isEmpty()); } diff --git a/src/mongo/db/s/session_catalog_migration_source.cpp b/src/mongo/db/s/session_catalog_migration_source.cpp index 38c1723adab..c257fc08404 100644 --- a/src/mongo/db/s/session_catalog_migration_source.cpp +++ b/src/mongo/db/s/session_catalog_migration_source.cpp @@ -63,6 +63,7 @@ boost::optional<repl::OplogEntry> forgeNoopEntryFromImageCollection( BSONObj imageObj = client.findOne(NamespaceString::kConfigImagesNamespace.ns(), BSON("_id" << retryableFindAndModifyOplogEntry.getSessionId()->toBSON()), + Query(), nullptr); if (imageObj.isEmpty()) { return boost::none; @@ -124,7 +125,7 @@ boost::optional<repl::OplogEntry> fetchPrePostImageOplog(OperationContext* opCtx auto opTime = opTimeToFetch.value(); DBDirectClient client(opCtx); auto oplogBSON = - client.findOne(NamespaceString::kRsOplogNamespace.ns(), opTime.asQuery(), nullptr); + client.findOne(NamespaceString::kRsOplogNamespace.ns(), opTime.asQuery(), Query(), nullptr); return uassertStatusOK(repl::OplogEntry::parse(oplogBSON)); } @@ -192,13 +193,12 @@ SessionCatalogMigrationSource::SessionCatalogMigrationSource(OperationContext* o _rollbackIdAtInit(repl::ReplicationProcess::get(opCtx)->getRollbackID()), _chunkRange(std::move(chunk)), _keyPattern(shardKey) { - Query query; // Sort is not needed for correctness. This is just for making it easier to write deterministic // tests. - query.sort(BSON("_id" << 1)); - DBDirectClient client(opCtx); - auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace, query); + auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace, + BSONObj{}, + Query().sort(BSON("_id" << 1))); while (cursor->more()) { auto nextSession = SessionTxnRecord::parse( @@ -422,7 +422,7 @@ bool SessionCatalogMigrationSource::_fetchNextNewWriteOplog(OperationContext* op DBDirectClient client(opCtx); const auto& newWriteOplogDoc = client.findOne( - NamespaceString::kRsOplogNamespace.ns(), nextOpTimeToFetch.asQuery(), nullptr); + NamespaceString::kRsOplogNamespace.ns(), nextOpTimeToFetch.asQuery(), Query(), nullptr); uassert(40620, str::stream() << "Unable to fetch oplog entry with opTime: " diff --git a/src/mongo/db/s/shard_key_util.cpp b/src/mongo/db/s/shard_key_util.cpp index 8641111c168..601a1624208 100644 --- a/src/mongo/db/s/shard_key_util.cpp +++ b/src/mongo/db/s/shard_key_util.cpp @@ -227,7 +227,7 @@ void ValidationBehaviorsShardCollection::verifyCanCreateShardKeyIndex( uassert(ErrorCodes::InvalidOptions, "Please create an index that starts with the proposed shard key before " "sharding the collection", - _localClient->findOne(nss.ns(), Query()).isEmpty()); + _localClient->findOne(nss.ns(), BSONObj{}).isEmpty()); } void ValidationBehaviorsShardCollection::createShardKeyIndex( diff --git a/src/mongo/db/s/shard_metadata_util.cpp b/src/mongo/db/s/shard_metadata_util.cpp index d7bf11091bf..12e25a256e5 100644 --- a/src/mongo/db/s/shard_metadata_util.cpp +++ b/src/mongo/db/s/shard_metadata_util.cpp @@ -137,12 +137,13 @@ StatusWith<RefreshState> getPersistedRefreshFlags(OperationContext* opCtx, StatusWith<ShardCollectionType> readShardCollectionsEntry(OperationContext* opCtx, const NamespaceString& nss) { - Query fullQuery(BSON(ShardCollectionType::kNssFieldName << nss.ns())); - try { DBDirectClient client(opCtx); std::unique_ptr<DBClientCursor> cursor = - client.query(NamespaceString::kShardConfigCollectionsNamespace, fullQuery, 1); + client.query(NamespaceString::kShardConfigCollectionsNamespace, + BSON(ShardCollectionType::kNssFieldName << nss.ns()), + Query(), + 1); if (!cursor) { return Status(ErrorCodes::OperationFailed, str::stream() << "Failed to establish a cursor for reading " @@ -165,12 +166,13 @@ StatusWith<ShardCollectionType> readShardCollectionsEntry(OperationContext* opCt } StatusWith<ShardDatabaseType> readShardDatabasesEntry(OperationContext* opCtx, StringData dbName) { - Query fullQuery(BSON(ShardDatabaseType::name() << dbName.toString())); - try { DBDirectClient client(opCtx); std::unique_ptr<DBClientCursor> cursor = - client.query(NamespaceString::kShardConfigDatabasesNamespace, fullQuery, 1); + client.query(NamespaceString::kShardConfigDatabasesNamespace, + BSON(ShardDatabaseType::name() << dbName.toString()), + Query(), + 1); if (!cursor) { return Status(ErrorCodes::OperationFailed, str::stream() << "Failed to establish a cursor for reading " @@ -292,11 +294,8 @@ StatusWith<std::vector<ChunkType>> readShardChunks(OperationContext* opCtx, try { DBDirectClient client(opCtx); - Query fullQuery(query); - fullQuery.sort(sort); - std::unique_ptr<DBClientCursor> cursor = - client.query(chunksNss, fullQuery, limit.get_value_or(0)); + client.query(chunksNss, query, Query().sort(sort), limit.get_value_or(0)); uassert(ErrorCodes::OperationFailed, str::stream() << "Failed to establish a cursor for reading " << chunksNss.ns() << " from local storage", diff --git a/src/mongo/db/s/shard_metadata_util_test.cpp b/src/mongo/db/s/shard_metadata_util_test.cpp index 1739c32da5d..814517f17d9 100644 --- a/src/mongo/db/s/shard_metadata_util_test.cpp +++ b/src/mongo/db/s/shard_metadata_util_test.cpp @@ -139,12 +139,13 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture { try { DBDirectClient client(operationContext()); for (auto& chunk : chunks) { - Query query(BSON(ChunkType::minShardID() - << chunk.getMin() << ChunkType::max() << chunk.getMax())); - query.readPref(ReadPreference::Nearest, BSONArray()); - NamespaceString chunkMetadataNss{ChunkType::ShardNSPrefix + uuid.toString()}; - std::unique_ptr<DBClientCursor> cursor = client.query(chunkMetadataNss, query, 1); + std::unique_ptr<DBClientCursor> cursor = + client.query(chunkMetadataNss, + BSON(ChunkType::minShardID() + << chunk.getMin() << ChunkType::max() << chunk.getMax()), + Query().readPref(ReadPreference::Nearest, BSONArray()), + 1); ASSERT(cursor); ASSERT(cursor->more()); diff --git a/src/mongo/db/s/sharding_ddl_util_test.cpp b/src/mongo/db/s/sharding_ddl_util_test.cpp index 7eb847f789d..9b9fdc950ca 100644 --- a/src/mongo/db/s/sharding_ddl_util_test.cpp +++ b/src/mongo/db/s/sharding_ddl_util_test.cpp @@ -83,12 +83,14 @@ const NamespaceString kToNss("test.to"); // Query 'limit' objects from the database into an array. void findN(DBClientBase& client, const std::string& ns, - Query query, + const BSONObj& filter, + const Query& querySettings, int limit, std::vector<BSONObj>& out) { out.reserve(limit); std::unique_ptr<DBClientCursor> c = client.query(NamespaceString(ns), - std::move(query), + filter, + querySettings, limit, 0 /*nToSkip*/, nullptr /*fieldsToReturn*/, @@ -109,9 +111,9 @@ TEST_F(ShardingDDLUtilTest, ShardedRenameMetadata) { DBDirectClient client(opCtx); const NamespaceString fromNss("test.from"); - const auto fromCollQuery = Query(BSON(CollectionType::kNssFieldName << fromNss.ns())); + const auto fromCollQuery = BSON(CollectionType::kNssFieldName << fromNss.ns()); - const auto toCollQuery = Query(BSON(CollectionType::kNssFieldName << kToNss.ns())); + const auto toCollQuery = BSON(CollectionType::kNssFieldName << kToNss.ns()); const Timestamp collTimestamp(1); const auto collUUID = UUID::gen(); @@ -156,10 +158,13 @@ TEST_F(ShardingDDLUtilTest, ShardedRenameMetadata) { // Get FROM collection document and chunks auto fromDoc = client.findOne(CollectionType::ConfigNS.ns(), fromCollQuery); CollectionType fromCollection(fromDoc); - auto fromChunksQuery = - Query(BSON(ChunkType::collectionUUID << collUUID)).sort(BSON("_id" << 1)); std::vector<BSONObj> fromChunks; - findN(client, ChunkType::ConfigNS.ns(), fromChunksQuery, nChunks, fromChunks); + findN(client, + ChunkType::ConfigNS.ns(), + BSON(ChunkType::collectionUUID << collUUID) /*filter*/, + Query().sort(BSON("_id" << 1)), + nChunks, + fromChunks); auto fromCollType = Grid::get(opCtx)->catalogClient()->getCollection(opCtx, fromNss); // Perform the metadata rename @@ -171,11 +176,14 @@ TEST_F(ShardingDDLUtilTest, ShardedRenameMetadata) { // Get TO collection document and chunks auto toDoc = client.findOne(CollectionType::ConfigNS.ns(), toCollQuery); - const auto toChunksQuery = - Query(BSON(ChunkType::collectionUUID << collUUID)).sort(BSON("_id" << 1)); CollectionType toCollection(toDoc); std::vector<BSONObj> toChunks; - findN(client, ChunkType::ConfigNS.ns(), toChunksQuery, nChunks, toChunks); + findN(client, + ChunkType::ConfigNS.ns(), + BSON(ChunkType::collectionUUID << collUUID) /*filter*/, + Query().sort(BSON("_id" << 1)), + nChunks, + toChunks); // Check that original epoch/timestamp are changed in config.collections entry ASSERT(fromCollection.getEpoch() != toCollection.getEpoch()); diff --git a/src/mongo/db/s/transaction_coordinator_test.cpp b/src/mongo/db/s/transaction_coordinator_test.cpp index 5b060c8d968..924b9920b37 100644 --- a/src/mongo/db/s/transaction_coordinator_test.cpp +++ b/src/mongo/db/s/transaction_coordinator_test.cpp @@ -137,7 +137,7 @@ protected: void waitUntilCoordinatorDocIsPresent() { DBDirectClient dbClient(operationContext()); - while (dbClient.findOne(NamespaceString::kTransactionCoordinatorsNamespace.ns(), Query()) + while (dbClient.findOne(NamespaceString::kTransactionCoordinatorsNamespace.ns(), BSONObj{}) .isEmpty()) ; } @@ -151,13 +151,14 @@ protected: do { doc = TransactionCoordinatorDocument::parse( IDLParserErrorContext("dummy"), - dbClient.findOne(NamespaceString::kTransactionCoordinatorsNamespace.ns(), Query())); + dbClient.findOne(NamespaceString::kTransactionCoordinatorsNamespace.ns(), + BSONObj{})); } while (!doc.getDecision()); } void waitUntilNoCoordinatorDocIsPresent() { DBDirectClient dbClient(operationContext()); - while (!dbClient.findOne(NamespaceString::kTransactionCoordinatorsNamespace.ns(), Query()) + while (!dbClient.findOne(NamespaceString::kTransactionCoordinatorsNamespace.ns(), BSONObj{}) .isEmpty()) ; } diff --git a/src/mongo/db/s/transaction_coordinator_util.cpp b/src/mongo/db/s/transaction_coordinator_util.cpp index 0bc0d019c6c..9e07378295d 100644 --- a/src/mongo/db/s/transaction_coordinator_util.cpp +++ b/src/mongo/db/s/transaction_coordinator_util.cpp @@ -163,7 +163,7 @@ repl::OpTime persistParticipantListBlocking(OperationContext* opCtx, // changed since the update above ran. const auto doc = client.findOne( NamespaceString::kTransactionCoordinatorsNamespace.toString(), - QUERY(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON())); + BSON(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON())); uasserted(51025, str::stream() << "While attempting to write participant list " << buildParticipantListString(participantList) << " for " @@ -375,7 +375,7 @@ repl::OpTime persistDecisionBlocking(OperationContext* opCtx, // changed since the update above ran. const auto doc = client.findOne( NamespaceString::kTransactionCoordinatorsNamespace.ns(), - QUERY(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON())); + BSON(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON())); uasserted(51026, str::stream() << "While attempting to write decision " << (isCommit ? "'commit'" : "'abort'") << " for" << lsid.getId() @@ -542,7 +542,7 @@ void deleteCoordinatorDocBlocking(OperationContext* opCtx, // changed since the update above ran. const auto doc = client.findOne( NamespaceString::kTransactionCoordinatorsNamespace.toString(), - QUERY(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON())); + BSON(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON())); uasserted(51027, str::stream() << "While attempting to delete document for " << lsid.getId() << ':' << txnNumber @@ -591,7 +591,7 @@ std::vector<TransactionCoordinatorDocument> readAllCoordinatorDocs(OperationCont DBDirectClient client(opCtx); auto coordinatorDocsCursor = - client.query(NamespaceString::kTransactionCoordinatorsNamespace, Query{}); + client.query(NamespaceString::kTransactionCoordinatorsNamespace, BSONObj{}); while (coordinatorDocsCursor->more()) { // TODO (SERVER-38307): Try/catch around parsing the document and skip the document if it diff --git a/src/mongo/db/s/vector_clock_shard_server_test.cpp b/src/mongo/db/s/vector_clock_shard_server_test.cpp index 22efd956506..1a62bf34d84 100644 --- a/src/mongo/db/s/vector_clock_shard_server_test.cpp +++ b/src/mongo/db/s/vector_clock_shard_server_test.cpp @@ -292,8 +292,8 @@ protected: } }; -const Query kVectorClockQuery = QUERY("_id" - << "vectorClockState"); +const BSONObj kVectorClockQuery = BSON("_id" + << "vectorClockState"); TEST_F(VectorClockPersistenceTest, PrimaryPersistVectorClockDocument) { auto sc = getServiceContext(); diff --git a/src/mongo/db/session_catalog_mongod.cpp b/src/mongo/db/session_catalog_mongod.cpp index 7fa62f5363a..075e5bda2cc 100644 --- a/src/mongo/db/session_catalog_mongod.cpp +++ b/src/mongo/db/session_catalog_mongod.cpp @@ -239,9 +239,9 @@ void createRetryableFindAndModifyTable(OperationContext* opCtx) { void abortInProgressTransactions(OperationContext* opCtx) { DBDirectClient client(opCtx); - Query query(BSON(SessionTxnRecord::kStateFieldName - << DurableTxnState_serializer(DurableTxnStateEnum::kInProgress))); - auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace, query); + auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace, + BSON(SessionTxnRecord::kStateFieldName << DurableTxnState_serializer( + DurableTxnStateEnum::kInProgress))); if (cursor->more()) { LOGV2_DEBUG(21977, 3, "Aborting in-progress transactions on stepup."); } @@ -428,12 +428,12 @@ int MongoDSessionCatalog::reapSessionsOlderThan(OperationContext* opCtx, // Scan for records older than the minimum lifetime and uses a sort to walk the '_id' index DBDirectClient client(opCtx); - auto cursor = - client.query(NamespaceString::kSessionTransactionsTableNamespace, - Query(BSON(kLastWriteDateFieldName << LT << possiblyExpired)).sort(kSortById), - 0, - 0, - &kIdProjection); + auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace, + BSON(kLastWriteDateFieldName << LT << possiblyExpired), + Query().sort(kSortById), + 0, + 0, + &kIdProjection); // The max batch size is chosen so that a single batch won't exceed the 16MB BSON object size // limit diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp index a11860b17e6..a9cfe387ad9 100644 --- a/src/mongo/db/transaction_participant.cpp +++ b/src/mongo/db/transaction_participant.cpp @@ -140,9 +140,8 @@ ActiveTransactionHistory fetchActiveTransactionHistory(OperationContext* opCtx, // field has been set, bumping the global lock acquisition to an IX. That upconvert would // require a flow control ticket to be obtained. FlowControl::Bypass flowControlBypass(opCtx); - auto result = - client.findOne(NamespaceString::kSessionTransactionsTableNamespace.ns(), - {BSON(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON())}); + auto result = client.findOne(NamespaceString::kSessionTransactionsTableNamespace.ns(), + BSON(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON())); if (result.isEmpty()) { return boost::none; } diff --git a/src/mongo/db/transaction_participant_retryable_writes_test.cpp b/src/mongo/db/transaction_participant_retryable_writes_test.cpp index 5b4ce8b1d49..c2582f6ba60 100644 --- a/src/mongo/db/transaction_participant_retryable_writes_test.cpp +++ b/src/mongo/db/transaction_participant_retryable_writes_test.cpp @@ -258,7 +258,7 @@ protected: DBDirectClient client(opCtx()); auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace, - {BSON("_id" << session->getSessionId().toBSON())}); + BSON("_id" << session->getSessionId().toBSON())); ASSERT(cursor); ASSERT(cursor->more()); @@ -296,7 +296,7 @@ TEST_F(TransactionParticipantRetryableWritesTest, SessionEntryNotWrittenOnBegin) DBDirectClient client(opCtx()); auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace, - {BSON("_id" << sessionId.toBSON())}); + BSON("_id" << sessionId.toBSON())); ASSERT(cursor); ASSERT(!cursor->more()); } @@ -313,7 +313,7 @@ TEST_F(TransactionParticipantRetryableWritesTest, SessionEntryWrittenAtFirstWrit DBDirectClient client(opCtx()); auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace, - {BSON("_id" << sessionId.toBSON())}); + BSON("_id" << sessionId.toBSON())); ASSERT(cursor); ASSERT(cursor->more()); @@ -339,7 +339,7 @@ TEST_F(TransactionParticipantRetryableWritesTest, DBDirectClient client(opCtx()); auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace, - {BSON("_id" << sessionId.toBSON())}); + BSON("_id" << sessionId.toBSON())); ASSERT(cursor); ASSERT(cursor->more()); diff --git a/src/mongo/db/vector_clock_mongod.cpp b/src/mongo/db/vector_clock_mongod.cpp index 157b0d32593..16ecec730c9 100644 --- a/src/mongo/db/vector_clock_mongod.cpp +++ b/src/mongo/db/vector_clock_mongod.cpp @@ -330,7 +330,7 @@ Future<void> VectorClockMongoD::_doWhileQueueNotEmptyOrError(ServiceContext* ser NamespaceString::kVectorClockNamespace); store.forEach( opCtx, - QUERY(VectorClockDocument::k_idFieldName << durableVectorClock.get_id()), + BSON(VectorClockDocument::k_idFieldName << durableVectorClock.get_id()), [&, numDocsFound = 0](const auto& doc) mutable { invariant(++numDocsFound == 1); durableVectorClock = doc; @@ -348,7 +348,7 @@ Future<void> VectorClockMongoD::_doWhileQueueNotEmptyOrError(ServiceContext* ser PersistentTaskStore<VectorClockDocument> store(NamespaceString::kVectorClockNamespace); store.upsert(opCtx, - QUERY(VectorClockDocument::k_idFieldName << vcd.get_id()), + BSON(VectorClockDocument::k_idFieldName << vcd.get_id()), vcd.toBSON(), WriteConcerns::kMajorityWriteConcern); return vectorTime; diff --git a/src/mongo/dbtests/clienttests.cpp b/src/mongo/dbtests/clienttests.cpp index 5da71a03c53..7519baada25 100644 --- a/src/mongo/dbtests/clienttests.cpp +++ b/src/mongo/dbtests/clienttests.cpp @@ -165,7 +165,7 @@ public: ASSERT_OK(dbtests::createIndex(&opCtx, ns(), BSON("a" << 1 << "b" << 1))); unique_ptr<DBClientCursor> c = - db.query(NamespaceString(ns()), Query().sort(BSON("a" << 1 << "b" << 1))); + db.query(NamespaceString(ns()), BSONObj{}, Query().sort(BSON("a" << 1 << "b" << 1))); ASSERT_EQUALS(1111, c->itcount()); } }; @@ -183,7 +183,7 @@ public: } unique_ptr<DBClientCursor> c = - db.query(NamespaceString(ns()), Query().sort(BSON("i" << 1))); + db.query(NamespaceString(ns()), BSONObj{}, Query().sort(BSON("i" << 1))); BSONObj o = c->next(); ASSERT(c->more()); diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp index 0603c45c998..1cb045a381d 100644 --- a/src/mongo/dbtests/dbhelper_tests.cpp +++ b/src/mongo/dbtests/dbhelper_tests.cpp @@ -84,7 +84,7 @@ private: BSONArray docs(OperationContext* opCtx) const { DBDirectClient client(opCtx); unique_ptr<DBClientCursor> cursor = - client.query(NamespaceString(ns), Query().hint(BSON("_id" << 1))); + client.query(NamespaceString(ns), BSONObj{}, Query().hint(BSON("_id" << 1))); BSONArrayBuilder bab; while (cursor->more()) { bab << cursor->next(); diff --git a/src/mongo/dbtests/directclienttests.cpp b/src/mongo/dbtests/directclienttests.cpp index a10aa4ee2c9..f3337944b76 100644 --- a/src/mongo/dbtests/directclienttests.cpp +++ b/src/mongo/dbtests/directclienttests.cpp @@ -93,7 +93,7 @@ public: OperationContext& opCtx = *opCtxPtr; DBDirectClient client(&opCtx); - ASSERT_THROWS_CODE(client.query(NamespaceString(), Query(), 1)->nextSafe(), + ASSERT_THROWS_CODE(client.query(NamespaceString(), BSONObj{}, Query(), 1)->nextSafe(), AssertionException, ErrorCodes::InvalidNamespace); } @@ -130,7 +130,8 @@ public: OperationContext& opCtx = *opCtxPtr; DBDirectClient client(&opCtx); - auto response = client.updateAcknowledged("", Query(), BSON("$set" << BSON("x" << 1))); + auto response = + client.updateAcknowledged("", BSONObj{} /*filter*/, BSON("$set" << BSON("x" << 1))); ASSERT_EQ(ErrorCodes::InvalidNamespace, getStatusFromCommandResult(response)); } }; @@ -142,7 +143,7 @@ public: OperationContext& opCtx = *opCtxPtr; DBDirectClient client(&opCtx); - auto response = client.removeAcknowledged("", Query()); + auto response = client.removeAcknowledged("", BSONObj{} /*filter*/); ASSERT_EQ(ErrorCodes::InvalidNamespace, getStatusFromCommandResult(response)); } }; diff --git a/src/mongo/dbtests/logical_sessions_tests.cpp b/src/mongo/dbtests/logical_sessions_tests.cpp index cb1ea6e14b6..335485881e7 100644 --- a/src/mongo/dbtests/logical_sessions_tests.cpp +++ b/src/mongo/dbtests/logical_sessions_tests.cpp @@ -57,14 +57,13 @@ Status insertRecord(OperationContext* opCtx, LogicalSessionRecord record) { return getStatusFromWriteCommandReply(response); } -BSONObj lsidQuery(const LogicalSessionId& lsid) { - return BSON(LogicalSessionRecord::kIdFieldName << lsid.toBSON()); -} - StatusWith<LogicalSessionRecord> fetchRecord(OperationContext* opCtx, const LogicalSessionId& lsid) { DBDirectClient client(opCtx); - auto cursor = client.query(NamespaceString(kTestNS), lsidQuery(lsid), 1); + auto cursor = client.query(NamespaceString(kTestNS), + BSON(LogicalSessionRecord::kIdFieldName << lsid.toBSON()), + Query(), + 1); if (!cursor->more()) { return {ErrorCodes::NoSuchSession, "No matching record in the sessions collection"}; } diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp index 07e5a88d7e3..e08ab6c84f7 100644 --- a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp +++ b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp @@ -100,7 +100,8 @@ std::pair<rpc::UniqueReply, DBClientBase*> MockDBClientConnection::runCommandWit std::unique_ptr<mongo::DBClientCursor> MockDBClientConnection::query( const NamespaceStringOrUUID& nsOrUuid, - mongo::Query query, + const BSONObj& filter, + const Query& querySettings, int limit, int nToSkip, const BSONObj* fieldsToReturn, @@ -112,7 +113,8 @@ std::unique_ptr<mongo::DBClientCursor> MockDBClientConnection::query( try { mongo::BSONArray result(_remoteServer->query(_remoteServerInstanceID, nsOrUuid, - query, + filter, + querySettings, limit, nToSkip, fieldsToReturn, @@ -125,15 +127,15 @@ std::unique_ptr<mongo::DBClientCursor> MockDBClientConnection::query( // A simple mock implementation of a resumable query, where we skip the first 'n' fields // where 'n' is given by the mock resume token. auto nToSkip = 0; - auto queryBson = fromjson(query.toString()); - if (queryBson.hasField("$_resumeAfter")) { - if (queryBson["$_resumeAfter"].Obj().hasField("n")) { - nToSkip = queryBson["$_resumeAfter"]["n"].numberInt(); + BSONObj querySettingsAsBSON = querySettings.getFullSettingsDeprecated(); + if (querySettingsAsBSON.hasField("$_resumeAfter")) { + if (querySettingsAsBSON["$_resumeAfter"].Obj().hasField("n")) { + nToSkip = querySettingsAsBSON["$_resumeAfter"]["n"].numberInt(); } } bool provideResumeToken = false; - if (queryBson.hasField("$_requestResumeToken")) { + if (querySettingsAsBSON.hasField("$_requestResumeToken")) { provideResumeToken = true; } @@ -179,13 +181,20 @@ mongo::ConnectionString::ConnectionType MockDBClientConnection::type() const { unsigned long long MockDBClientConnection::query( std::function<void(mongo::DBClientCursorBatchIterator&)> f, const NamespaceStringOrUUID& nsOrUuid, - mongo::Query query, + const BSONObj& filter, + const Query& querySettings, const mongo::BSONObj* fieldsToReturn, int queryOptions, int batchSize, boost::optional<BSONObj> readConcernObj) { - return DBClientBase::query( - f, nsOrUuid, query, fieldsToReturn, queryOptions, batchSize, readConcernObj); + return DBClientBase::query(f, + nsOrUuid, + filter, + querySettings, + fieldsToReturn, + queryOptions, + batchSize, + readConcernObj); } uint64_t MockDBClientConnection::getSockCreationMicroSec() const { @@ -209,10 +218,10 @@ void MockDBClientConnection::insert(const string& ns, } void MockDBClientConnection::remove(const string& ns, - Query query, + const BSONObj& filter, bool removeMany, boost::optional<BSONObj> writeConcernObj) { - _remoteServer->remove(ns, std::move(query)); + _remoteServer->remove(ns, filter); } void MockDBClientConnection::killCursor(const NamespaceString& ns, long long cursorID) { diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.h b/src/mongo/dbtests/mock/mock_dbclient_connection.h index 2dc55f21bc5..3160f34e060 100644 --- a/src/mongo/dbtests/mock/mock_dbclient_connection.h +++ b/src/mongo/dbtests/mock/mock_dbclient_connection.h @@ -122,7 +122,8 @@ public: std::unique_ptr<mongo::DBClientCursor> query( const NamespaceStringOrUUID& nsOrUuid, - mongo::Query query = mongo::Query(), + const BSONObj& filter = BSONObj{}, + const Query& querySettings = Query(), int limit = 0, int nToSkip = 0, const mongo::BSONObj* fieldsToReturn = nullptr, @@ -143,7 +144,7 @@ public: boost::optional<BSONObj> writeConcernObj = boost::none) override; void remove(const std::string& ns, - Query query, + const BSONObj& filter, bool removeMany = true, boost::optional<BSONObj> writeConcernObj = boost::none) override; @@ -183,7 +184,8 @@ public: unsigned long long query(std::function<void(mongo::DBClientCursorBatchIterator&)> f, const NamespaceStringOrUUID& nsOrUuid, - mongo::Query query, + const BSONObj& filter, + const Query& querySettings, const mongo::BSONObj* fieldsToReturn = nullptr, int queryOptions = 0, int batchSize = 0, diff --git a/src/mongo/dbtests/mock/mock_remote_db_server.cpp b/src/mongo/dbtests/mock/mock_remote_db_server.cpp index 49d3c2cae84..7b7031aa9a4 100644 --- a/src/mongo/dbtests/mock/mock_remote_db_server.cpp +++ b/src/mongo/dbtests/mock/mock_remote_db_server.cpp @@ -131,7 +131,7 @@ void MockRemoteDBServer::insert(const string& ns, BSONObj obj) { mockCollection.push_back(obj.copy()); } -void MockRemoteDBServer::remove(const string& ns, Query) { +void MockRemoteDBServer::remove(const string& ns, const BSONObj&) { scoped_spinlock sLock(_lock); if (_dataMgr.count(ns) == 0) { return; @@ -197,7 +197,8 @@ BSONObj MockRemoteDBServer::project(projection_executor::ProjectionExecutor* pro mongo::BSONArray MockRemoteDBServer::query(MockRemoteDBServer::InstanceID id, const NamespaceStringOrUUID& nsOrUuid, - mongo::Query query, + const BSONObj& filter, + const Query& querySettings, int limit, int nToSkip, const BSONObj* fieldsToReturn, diff --git a/src/mongo/dbtests/mock/mock_remote_db_server.h b/src/mongo/dbtests/mock/mock_remote_db_server.h index 4ca3e1d8cd2..7ae742f769c 100644 --- a/src/mongo/dbtests/mock/mock_remote_db_server.h +++ b/src/mongo/dbtests/mock/mock_remote_db_server.h @@ -145,9 +145,9 @@ public: * Removes documents from this server. * * @param ns the namespace to remove documents from. - * @param query ignored. + * @param filter ignored. */ - void remove(const std::string& ns, Query query); + void remove(const std::string& ns, const BSONObj& filter); /** * Assign a UUID to a collection @@ -164,7 +164,8 @@ public: mongo::BSONArray query(InstanceID id, const NamespaceStringOrUUID& nsOrUuid, - mongo::Query query = mongo::Query(), + const BSONObj& filter, + const Query& querySettings, int limit = 0, int nToSkip = 0, const mongo::BSONObj* fieldsToReturn = nullptr, diff --git a/src/mongo/dbtests/mock_dbclient_conn_test.cpp b/src/mongo/dbtests/mock_dbclient_conn_test.cpp index 5ba957bf72a..0aeedceb720 100644 --- a/src/mongo/dbtests/mock_dbclient_conn_test.cpp +++ b/src/mongo/dbtests/mock_dbclient_conn_test.cpp @@ -258,7 +258,7 @@ TEST(MockDBClientConnTest, SimpleRemove) { { MockDBClientConnection conn(&server); - conn.remove(ns, Query()); + conn.remove(ns, BSONObj{} /*filter*/); } { @@ -303,7 +303,7 @@ TEST(MockDBClientConnTest, MultiNSRemove) { { MockDBClientConnection conn(&server); - conn.remove(ns2, Query()); + conn.remove(ns2, BSONObj{} /*filter*/); std::unique_ptr<mongo::DBClientCursor> cursor = conn.query(NamespaceString(ns2)); ASSERT(!cursor->more()); @@ -351,7 +351,7 @@ TEST(MockDBClientConnTest, InsertAfterRemove) { { MockDBClientConnection conn(&server); - conn.remove(ns, Query()); + conn.remove(ns, BSONObj{} /*filter*/); } { @@ -618,6 +618,7 @@ TEST(MockDBClientConnTest, SimulateCallAndRecvResponses) { mongo::DBClientCursor cursor(&conn, mongo::NamespaceStringOrUUID(nss), BSONObj{}, + Query(), 0, 0, nullptr, @@ -688,7 +689,7 @@ TEST(MockDBClientConnTest, SimulateCallErrors) { MockDBClientConnection conn(&server); mongo::DBClientCursor cursor( - &conn, mongo::NamespaceStringOrUUID(nss), BSONObj{}, 0, 0, nullptr, 0, 0); + &conn, mongo::NamespaceStringOrUUID(nss), BSONObj{}, Query(), 0, 0, nullptr, 0, 0); // Test network exception and error response for the initial find. MockDBClientConnection::Responses callResponses = { @@ -738,6 +739,7 @@ TEST(MockDBClientConnTest, SimulateRecvErrors) { mongo::DBClientCursor cursor(&conn, mongo::NamespaceStringOrUUID(nss), BSONObj{}, + Query(), 0, 0, nullptr, @@ -786,6 +788,7 @@ TEST(MockDBClientConnTest, BlockingNetwork) { mongo::DBClientCursor cursor(&conn, mongo::NamespaceStringOrUUID(nss), BSONObj{}, + Query(), 0, 0, nullptr, @@ -847,6 +850,7 @@ TEST(MockDBClientConnTest, ShutdownServerBeforeCall) { mongo::DBClientCursor cursor(&conn, mongo::NamespaceStringOrUUID(nss), BSONObj{}, + Query(), 0, 0, nullptr, @@ -872,6 +876,7 @@ TEST(MockDBClientConnTest, ShutdownServerAfterCall) { mongo::DBClientCursor cursor(&conn, mongo::NamespaceStringOrUUID(nss), BSONObj{}, + Query(), 0, 0, nullptr, @@ -903,6 +908,7 @@ TEST(MockDBClientConnTest, ConnectionAutoReconnect) { mongo::DBClientCursor cursor(&conn, mongo::NamespaceStringOrUUID(nss), BSONObj{}, + Query(), 0, 0, nullptr, @@ -935,6 +941,7 @@ TEST(MockDBClientConnTest, ShutdownServerBeforeRecv) { mongo::DBClientCursor cursor(&conn, mongo::NamespaceStringOrUUID(nss), BSONObj{}, + Query(), 0, 0, nullptr, @@ -960,6 +967,7 @@ TEST(MockDBClientConnTest, ShutdownServerAfterRecv) { mongo::DBClientCursor cursor(&conn, mongo::NamespaceStringOrUUID(nss), BSONObj{}, + Query(), 0, 0, nullptr, diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp index a35ad787320..c6d3ae11a2c 100644 --- a/src/mongo/dbtests/querytests.cpp +++ b/src/mongo/dbtests/querytests.cpp @@ -282,9 +282,9 @@ public: BSONObjBuilder a; a.appendMaxKey("$lt"); BSONObj limit = a.done(); - ASSERT(!_client.findOne(ns, QUERY("a" << limit)).isEmpty()); + ASSERT(!_client.findOne(ns, BSON("a" << limit)).isEmpty()); ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1))); - ASSERT(!_client.findOne(ns, QUERY("a" << limit).hint(BSON("a" << 1))).isEmpty()); + ASSERT(!_client.findOne(ns, BSON("a" << limit), Query().hint(BSON("a" << 1))).isEmpty()); } }; @@ -299,7 +299,7 @@ public: insert(ns, BSON("a" << 2)); insert(ns, BSON("a" << 3)); unique_ptr<DBClientCursor> cursor = - _client.query(NamespaceString(ns), BSONObj(), 0, 0, nullptr, 0, 2); + _client.query(NamespaceString(ns), BSONObj{}, Query(), 0, 0, nullptr, 0, 2); long long cursorId = cursor->getCursorId(); cursor->decouple(); cursor.reset(); @@ -336,7 +336,7 @@ public: // Create a cursor on the collection, with a batch size of 200. unique_ptr<DBClientCursor> cursor = - _client.query(NamespaceString(ns), "", 0, 0, nullptr, 0, 200); + _client.query(NamespaceString(ns), BSONObj{}, Query(), 0, 0, nullptr, 0, 200); // Count 500 results, spanning a few batches of documents. for (int i = 0; i < 500; ++i) { @@ -382,7 +382,7 @@ public: // Create a cursor on the collection, with a batch size of 200. unique_ptr<DBClientCursor> cursor = - _client.query(NamespaceString(ns), "", 0, 0, nullptr, 0, 200); + _client.query(NamespaceString(ns), BSONObj{}, Query(), 0, 0, nullptr, 0, 200); CursorId cursorId = cursor->getCursorId(); // Count 500 results, spanning a few batches of documents. @@ -424,19 +424,22 @@ public: } void testLimit(int limit) { - ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj(), limit)->itcount(), limit); + ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj{}, Query(), limit)->itcount(), + limit); } void run() { for (int i = 0; i < 1000; i++) insert(ns, BSON(GENOID << "i" << i)); - ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj(), 1)->itcount(), 1); - ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj(), 10)->itcount(), 10); - ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj(), 101)->itcount(), 101); - ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj(), 999)->itcount(), 999); - ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj(), 1000)->itcount(), 1000); - ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj(), 1001)->itcount(), 1000); - ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj(), 0)->itcount(), 1000); + ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj{}, Query(), 1)->itcount(), 1); + ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj{}, Query(), 10)->itcount(), 10); + ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj{}, Query(), 101)->itcount(), 101); + ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj{}, Query(), 999)->itcount(), 999); + ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj{}, Query(), 1000)->itcount(), + 1000); + ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj{}, Query(), 1001)->itcount(), + 1000); + ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj{}, Query(), 0)->itcount(), 1000); } }; @@ -457,6 +460,7 @@ public: insert(ns, BSON("a" << 1)); insert(ns, BSON("a" << 2)); unique_ptr<DBClientCursor> c = _client.query(NamespaceString(ns), + BSONObj{}, Query().hint(BSON("$natural" << 1)), 0, 0, @@ -490,6 +494,7 @@ public: const char* ns = "unittests.querytests.EmptyTail"; _client.createCollection(ns, 1900, true); unique_ptr<DBClientCursor> c = _client.query(NamespaceString(ns), + BSONObj{}, Query().hint(BSON("$natural" << 1)), 2, 0, @@ -499,7 +504,8 @@ public: ASSERT(c->isDead()); insert(ns, BSON("a" << 0)); c = _client.query(NamespaceString(ns), - QUERY("a" << 1).hint(BSON("$natural" << 1)), + BSON("a" << 1), + Query().hint(BSON("$natural" << 1)), 2, 0, nullptr, @@ -525,6 +531,7 @@ public: insert(ns, BSON("a" << 0)); insert(ns, BSON("a" << 1)); unique_ptr<DBClientCursor> c = _client.query(NamespaceString(ns), + BSONObj{}, Query().hint(BSON("$natural" << 1)), 2, 0, @@ -557,6 +564,7 @@ public: insert(ns, BSON("a" << 0)); insert(ns, BSON("a" << 1)); unique_ptr<DBClientCursor> c = _client.query(NamespaceString(ns), + BSONObj{}, Query().hint(BSON("$natural" << 1)), 2, 0, @@ -591,6 +599,7 @@ public: insert(ns, BSON("a" << 0)); insert(ns, BSON("a" << 1)); unique_ptr<DBClientCursor> c = _client.query(NamespaceString(ns), + BSONObj{}, Query().hint(BSON("$natural" << 1)), 0, 0, @@ -617,7 +626,7 @@ public: _client.insert(ns, BSONObj()); ASSERT_THROWS( _client.query( - NamespaceString(ns), BSONObj(), 0, 0, nullptr, QueryOption_CursorTailable), + NamespaceString(ns), BSONObj{}, Query(), 0, 0, nullptr, QueryOption_CursorTailable), AssertionException); } }; @@ -651,12 +660,18 @@ public: info); insertA(ns, 0); insertA(ns, 1); - unique_ptr<DBClientCursor> c1 = _client.query( - NamespaceString(ns), QUERY("a" << GT << -1), 0, 0, nullptr, QueryOption_CursorTailable); + unique_ptr<DBClientCursor> c1 = _client.query(NamespaceString(ns), + BSON("a" << GT << -1), + Query(), + 0, + 0, + nullptr, + QueryOption_CursorTailable); OID id; id.init("000000000000000000000000"); unique_ptr<DBClientCursor> c2 = _client.query(NamespaceString(ns), - QUERY("value" << GT << id), + BSON("value" << GT << id), + Query(), 0, 0, nullptr, @@ -709,19 +724,20 @@ public: insert(ns, BSON("ts" << Timestamp(1000, 0))); insert(ns, BSON("ts" << Timestamp(1000, 1))); insert(ns, BSON("ts" << Timestamp(1000, 2))); - unique_ptr<DBClientCursor> c = - _client.query(NamespaceString(ns), - QUERY("ts" << GT << Timestamp(1000, 1)).hint(BSON("$natural" << 1)), - 0, - 0, - nullptr); + unique_ptr<DBClientCursor> c = _client.query(NamespaceString(ns), + BSON("ts" << GT << Timestamp(1000, 1)), + Query().hint(BSON("$natural" << 1)), + 0, + 0, + nullptr); ASSERT(c->more()); ASSERT_EQUALS(2u, c->next()["ts"].timestamp().getInc()); ASSERT(!c->more()); insert(ns, BSON("ts" << Timestamp(1000, 3))); c = _client.query(NamespaceString(ns), - QUERY("ts" << GT << Timestamp(1000, 1)).hint(BSON("$natural" << 1)), + BSON("ts" << GT << Timestamp(1000, 1)), + Query().hint(BSON("$natural" << 1)), 0, 0, nullptr); @@ -834,11 +850,11 @@ public: } void run() { const char* ns = "unittests.querytests._UnderscoreNs"; - ASSERT(_client.findOne(ns, "{}").isEmpty()); + ASSERT(_client.findOne(ns, BSONObj{}).isEmpty()); auto response = _client.insertAcknowledged(ns, {BSON("a" << 1)}); ASSERT_OK(getStatusFromWriteCommandReply(response)); ASSERT_EQ(1, response["n"].Int()); - ASSERT_EQUALS(1, _client.findOne(ns, "{}").getIntField("a")); + ASSERT_EQUALS(1, _client.findOne(ns, BSONObj{}).getIntField("a")); } }; @@ -850,9 +866,9 @@ public: void run() { const char* ns = "unittests.querytests.EmptyFieldSpec"; _client.insert(ns, BSON("a" << 1)); - ASSERT(!_client.findOne(ns, "").isEmpty()); + ASSERT(!_client.findOne(ns, BSONObj{}).isEmpty()); BSONObj empty; - ASSERT(!_client.findOne(ns, "", &empty).isEmpty()); + ASSERT(!_client.findOne(ns, BSONObj{}, Query(), &empty).isEmpty()); } }; @@ -997,7 +1013,9 @@ public: _client.insert(ns, fromjson("{a:[1,2,3]}")); ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1))); ASSERT(_client - .query(NamespaceString(ns), QUERY("a" << mongo::BSIZE << 3).hint(BSON("a" << 1))) + .query(NamespaceString(ns), + BSON("a" << mongo::BSIZE << 3), + Query().hint(BSON("a" << 1))) ->more()); } }; @@ -1010,13 +1028,17 @@ public: void run() { const char* ns = "unittests.querytests.IndexedArray"; _client.insert(ns, fromjson("{a:[1,2,3]}")); - ASSERT(_client.query(NamespaceString(ns), Query("{a:[1,2,3]}"))->more()); + ASSERT(_client.query(NamespaceString(ns), fromjson("{a:[1,2,3]}"))->more()); ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1))); + ASSERT(_client + .query(NamespaceString(ns), + fromjson("{a:{$in:[1,[1,2,3]]}}"), + Query().hint(BSON("a" << 1))) + ->more()); ASSERT( - _client.query(NamespaceString(ns), Query("{a:{$in:[1,[1,2,3]]}}").hint(BSON("a" << 1))) + _client + .query(NamespaceString(ns), fromjson("{a:[1,2,3]}"), Query().hint(BSON("a" << 1))) ->more()); - ASSERT( - _client.query(NamespaceString(ns), Query("{a:[1,2,3]}").hint(BSON("a" << 1)))->more()); } }; @@ -1036,14 +1058,23 @@ public: private: void check(const string& hintField) { const char* ns = "unittests.querytests.InsideArray"; - ASSERT(_client.query(NamespaceString(ns), Query("{a:[[1],2]}").hint(BSON(hintField << 1))) - ->more()); - ASSERT(_client.query(NamespaceString(ns), Query("{a:[1]}").hint(BSON(hintField << 1))) + ASSERT(_client + .query(NamespaceString(ns), + fromjson("{a:[[1],2]}"), + Query().hint(BSON(hintField << 1))) ->more()); ASSERT( - _client.query(NamespaceString(ns), Query("{a:2}").hint(BSON(hintField << 1)))->more()); + _client + .query(NamespaceString(ns), fromjson("{a:[1]}"), Query().hint(BSON(hintField << 1))) + ->more()); + ASSERT( + _client + .query(NamespaceString(ns), fromjson("{a:2}"), Query().hint(BSON(hintField << 1))) + ->more()); ASSERT( - !_client.query(NamespaceString(ns), Query("{a:1}").hint(BSON(hintField << 1)))->more()); + !_client + .query(NamespaceString(ns), fromjson("{a:1}"), Query().hint(BSON(hintField << 1))) + ->more()); } }; @@ -1057,10 +1088,11 @@ public: _client.insert(ns, fromjson("{'_id':1,a:[1]}")); _client.insert(ns, fromjson("{'_id':2,a:[[1]]}")); ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1))); - ASSERT_EQUALS(1, - _client.query(NamespaceString(ns), Query("{a:[1]}").hint(BSON("a" << 1))) - ->next() - .getIntField("_id")); + ASSERT_EQUALS( + 1, + _client.query(NamespaceString(ns), fromjson("{a:[1]}"), Query().hint(BSON("a" << 1))) + ->next() + .getIntField("_id")); } }; @@ -1080,67 +1112,19 @@ public: private: void check(const string& hintField) { const char* ns = "unittests.querytests.SubobjArr"; - ASSERT(_client.query(NamespaceString(ns), Query("{'a.b':1}").hint(BSON(hintField << 1))) + ASSERT(_client + .query(NamespaceString(ns), + fromjson("{'a.b':1}"), + Query().hint(BSON(hintField << 1))) ->more()); - ASSERT(_client.query(NamespaceString(ns), Query("{'a.b':[1]}").hint(BSON(hintField << 1))) + ASSERT(_client + .query(NamespaceString(ns), + fromjson("{'a.b':[1]}"), + Query().hint(BSON(hintField << 1))) ->more()); } }; -class MinMax : public ClientBase { -public: - MinMax() : ns("unittests.querytests.MinMax") {} - ~MinMax() { - _client.dropCollection("unittests.querytests.MinMax"); - } - void run() { - ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1 << "b" << 1))); - _client.insert(ns, BSON("a" << 1 << "b" << 1)); - _client.insert(ns, BSON("a" << 1 << "b" << 2)); - _client.insert(ns, BSON("a" << 2 << "b" << 1)); - _client.insert(ns, BSON("a" << 2 << "b" << 2)); - - ASSERT_EQUALS(4, count(_client.query(NamespaceString(ns), BSONObj()))); - BSONObj hint = BSON("a" << 1 << "b" << 1); - check(0, 0, 3, 3, 4, hint); - check(1, 1, 2, 2, 3, hint); - check(1, 2, 2, 2, 2, hint); - check(1, 2, 2, 1, 1, hint); - - unique_ptr<DBClientCursor> c = query(1, 2, 2, 2, hint); - BSONObj obj = c->next(); - ASSERT_EQUALS(1, obj.getIntField("a")); - ASSERT_EQUALS(2, obj.getIntField("b")); - obj = c->next(); - ASSERT_EQUALS(2, obj.getIntField("a")); - ASSERT_EQUALS(1, obj.getIntField("b")); - ASSERT(!c->more()); - } - -private: - unique_ptr<DBClientCursor> query(int minA, int minB, int maxA, int maxB, const BSONObj& hint) { - Query q; - q = q.minKey(BSON("a" << minA << "b" << minB)).maxKey(BSON("a" << maxA << "b" << maxB)); - q.hint(hint); - return _client.query(NamespaceString(ns), q); - } - void check( - int minA, int minB, int maxA, int maxB, int expectedCount, const BSONObj& hint = empty_) { - ASSERT_EQUALS(expectedCount, count(query(minA, minB, maxA, maxB, hint))); - } - int count(unique_ptr<DBClientCursor> c) { - int ret = 0; - while (c->more()) { - ++ret; - c->next(); - } - return ret; - } - const char* ns; - static BSONObj empty_; -}; -BSONObj MinMax::empty_; - class MatchCodeCodeWScope : public ClientBase { public: MatchCodeCodeWScope() : _ns("unittests.querytests.MatchCodeCodeWScope"), _nss(_ns) {} @@ -1263,7 +1247,8 @@ public: _client.dropCollection("unittests.querytests.DifferentNumbers"); } void t(const char* ns) { - unique_ptr<DBClientCursor> cursor = _client.query(NamespaceString(ns), Query().sort("7")); + unique_ptr<DBClientCursor> cursor = + _client.query(NamespaceString(ns), BSONObj{}, Query().sort("7")); while (cursor->more()) { BSONObj o = cursor->next(); verify(o.valid()); @@ -1350,7 +1335,7 @@ public: b.append("z", 17); _client.insert(ns(), b.obj()); } - ASSERT_EQUALS(17, _client.findOne(ns(), BSONObj())["z"].number()); + ASSERT_EQUALS(17, _client.findOne(ns(), BSONObj{})["z"].number()); { BSONObjBuilder b; b.appendSymbol("x", "eliot"); @@ -1360,14 +1345,16 @@ public: _client .findOne(ns(), BSON("x" - << "eliot"))["z"] + << "eliot"), + Query())["z"] .number()); ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), BSON("x" << 1))); ASSERT_EQUALS(17, _client .findOne(ns(), BSON("x" - << "eliot"))["z"] + << "eliot"), + Query())["z"] .number()); } }; @@ -1407,13 +1394,13 @@ public: int a = count(); - unique_ptr<DBClientCursor> c = - _client.query(NamespaceString(ns()), - QUERY("i" << GT << 0).hint(BSON("$natural" << 1)), - 0, - 0, - nullptr, - QueryOption_CursorTailable); + unique_ptr<DBClientCursor> c = _client.query(NamespaceString(ns()), + BSON("i" << GT << 0), + Query().hint(BSON("$natural" << 1)), + 0, + 0, + nullptr, + QueryOption_CursorTailable); int n = 0; while (c->more()) { BSONObj z = c->next(); @@ -1577,13 +1564,19 @@ public: for (int k = 0; k < 5; ++k) { _client.insert(ns(), BSON("ts" << Timestamp(1000, i++))); unsigned min = - _client.query(NamespaceString(ns()), Query().sort(BSON("$natural" << 1))) + _client + .query(NamespaceString(ns()), BSONObj{}, Query().sort(BSON("$natural" << 1))) ->next()["ts"] .timestamp() .getInc(); for (unsigned j = -1; j < i; ++j) { - unique_ptr<DBClientCursor> c = _client.query( - NamespaceString(ns()), QUERY("ts" << GTE << Timestamp(1000, j)), 0, 0, nullptr); + unique_ptr<DBClientCursor> c = + _client.query(NamespaceString(ns()), + BSON("ts" << GTE << Timestamp(1000, j)), + Query(), + 0, + 0, + nullptr); ASSERT(c->more()); BSONObj next = c->next(); ASSERT(!next["ts"].eoo()); @@ -1636,13 +1629,19 @@ public: for (int k = 0; k < 5; ++k) { _client.insert(ns(), BSON("ts" << Timestamp(1000, i++))); unsigned min = - _client.query(NamespaceString(ns()), Query().sort(BSON("$natural" << 1))) + _client + .query(NamespaceString(ns()), BSONObj{}, Query().sort(BSON("$natural" << 1))) ->next()["ts"] .timestamp() .getInc(); for (unsigned j = -1; j < i; ++j) { - unique_ptr<DBClientCursor> c = _client.query( - NamespaceString(ns()), QUERY("ts" << GTE << Timestamp(1000, j)), 0, 0, nullptr); + unique_ptr<DBClientCursor> c = + _client.query(NamespaceString(ns()), + BSON("ts" << GTE << Timestamp(1000, j)), + Query(), + 0, + 0, + nullptr); ASSERT(c->more()); BSONObj next = c->next(); ASSERT(!next["ts"].eoo()); @@ -1677,7 +1676,8 @@ public: // Check oplog replay mode with missing collection. unique_ptr<DBClientCursor> c0 = _client.query(NamespaceString("local.oplog.querytests.missing"), - QUERY("ts" << GTE << Timestamp(1000, 50)), + BSON("ts" << GTE << Timestamp(1000, 50)), + Query(), 0, 0, nullptr); @@ -1704,15 +1704,23 @@ public: } // Check oplog replay mode with empty collection. - unique_ptr<DBClientCursor> c = _client.query( - NamespaceString(ns()), QUERY("ts" << GTE << Timestamp(1000, 50)), 0, 0, nullptr); + unique_ptr<DBClientCursor> c = _client.query(NamespaceString(ns()), + BSON("ts" << GTE << Timestamp(1000, 50)), + Query(), + 0, + 0, + nullptr); ASSERT(!c->more()); // Check with some docs in the collection. for (int i = 100; i < 150; _client.insert(ns(), BSON("ts" << Timestamp(1000, i++)))) ; - c = _client.query( - NamespaceString(ns()), QUERY("ts" << GTE << Timestamp(1000, 50)), 0, 0, nullptr); + c = _client.query(NamespaceString(ns()), + BSON("ts" << GTE << Timestamp(1000, 50)), + Query(), + 0, + 0, + nullptr); ASSERT(c->more()); ASSERT_EQUALS(100u, c->next()["ts"].timestamp().getInc()); @@ -1761,7 +1769,7 @@ public: insert(ns(), BSON("a" << 2)); insert(ns(), BSON("a" << 3)); unique_ptr<DBClientCursor> cursor = - _client.query(NamespaceStringOrUUID("unittests", *coll_opts.uuid), BSONObj()); + _client.query(NamespaceStringOrUUID("unittests", *coll_opts.uuid), BSONObj{}); ASSERT_EQUALS(string(ns()), cursor->getns()); for (int i = 1; i <= 3; ++i) { ASSERT(cursor->more()); @@ -1918,7 +1926,7 @@ public: // With five results and a batch size of 5, a cursor is created since we don't know // there are no more results. std::unique_ptr<DBClientCursor> c = - _client.query(NamespaceString(ns()), Query(), 0, 0, nullptr, 0, 5); + _client.query(NamespaceString(ns()), BSONObj{}, Query(), 0, 0, nullptr, 0, 5); ASSERT(c->more()); ASSERT_NE(0, c->getCursorId()); for (int i = 0; i < 5; ++i) { @@ -1930,23 +1938,14 @@ public: { // With a batchsize of 6 we know there are no more results so we don't create a // cursor. - std::unique_ptr<DBClientCursor> c = _client.query(NamespaceString(ns()), Query(), 6); + std::unique_ptr<DBClientCursor> c = + _client.query(NamespaceString(ns()), BSONObj{}, Query(), 6); ASSERT(c->more()); ASSERT_EQ(0, c->getCursorId()); } } }; -namespace queryobjecttests { -class names1 { -public: - void run() { - ASSERT_BSONOBJ_EQ(BSON("x" << 1), QUERY("query" << BSON("x" << 1)).getFilter()); - ASSERT_BSONOBJ_EQ(BSON("x" << 1), QUERY("$query" << BSON("x" << 1)).getFilter()); - } -}; -} // namespace queryobjecttests - class OrderingTest { public: void run() { @@ -2012,7 +2011,6 @@ public: add<InsideArray>(); add<IndexInsideArrayCorrect>(); add<SubobjArr>(); - add<MinMax>(); add<MatchCodeCodeWScope>(); add<MatchDBRefType>(); add<DirectLocking>(); @@ -2031,7 +2029,6 @@ public: add<CountByUUID>(); add<GetDatabaseInfosTest>(); add<QueryReadsAll>(); - add<queryobjecttests::names1>(); add<OrderingTest>(); add<WhatsMySni>(); } diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp index d33376da198..e908bee2c7f 100644 --- a/src/mongo/dbtests/repltests.cpp +++ b/src/mongo/dbtests/repltests.cpp @@ -183,13 +183,6 @@ protected: void checkOne(const BSONObj& o) const { check(o, one(o)); } - void checkAll(const BSONObj& o) const { - unique_ptr<DBClientCursor> c = _client.query(NamespaceString(ns()), o); - verify(c->more()); - while (c->more()) { - check(o, c->next()); - } - } void check(const BSONObj& expected, const BSONObj& got) const { if (expected.woCompare(got)) { LOGV2(22500, @@ -219,14 +212,14 @@ protected: return count; } int opCount() { - return DBDirectClient(&_opCtx).query(NamespaceString(cllNS()), BSONObj())->itcount(); + return DBDirectClient(&_opCtx).query(NamespaceString(cllNS()), BSONObj{})->itcount(); } void applyAllOperations() { Lock::GlobalWrite lk(&_opCtx); vector<BSONObj> ops; { DBDirectClient db(&_opCtx); - auto cursor = db.query(NamespaceString(cllNS()), BSONObj()); + auto cursor = db.query(NamespaceString(cllNS()), BSONObj{}); while (cursor->more()) { ops.push_back(cursor->nextSafe()); } @@ -381,10 +374,10 @@ public: b.append("a", 1); b.appendTimestamp("t"); _client.insert(ns(), b.done()); - date_ = _client.findOne(ns(), QUERY("a" << 1)).getField("t").date(); + date_ = _client.findOne(ns(), BSON("a" << 1)).getField("t").date(); } void check() const { - BSONObj o = _client.findOne(ns(), QUERY("a" << 1)); + BSONObj o = _client.findOne(ns(), BSON("a" << 1)); ASSERT(Date_t{} != o.getField("t").date()); ASSERT_EQUALS(date_, o.getField("t").date()); } @@ -472,10 +465,10 @@ public: b.append("_id", 1); b.appendTimestamp("t"); _client.update(ns(), BSON("_id" << 1), b.done()); - date_ = _client.findOne(ns(), QUERY("_id" << 1)).getField("t").date(); + date_ = _client.findOne(ns(), BSON("_id" << 1)).getField("t").date(); } void check() const { - BSONObj o = _client.findOne(ns(), QUERY("_id" << 1)); + BSONObj o = _client.findOne(ns(), BSON("_id" << 1)); ASSERT(Date_t{} != o.getField("t").date()); ASSERT_EQUALS(date_, o.getField("t").date()); } @@ -772,7 +765,7 @@ public: string s() const { StringBuilder ss; unique_ptr<DBClientCursor> cc = - _client.query(NamespaceString(ns()), Query().sort(BSON("_id" << 1))); + _client.query(NamespaceString(ns()), BSONObj{}, Query().sort(BSON("_id" << 1))); bool first = true; while (cc->more()) { if (first) diff --git a/src/mongo/dbtests/updatetests.cpp b/src/mongo/dbtests/updatetests.cpp index 63ee44d438f..474402e99f8 100644 --- a/src/mongo/dbtests/updatetests.cpp +++ b/src/mongo/dbtests/updatetests.cpp @@ -177,7 +177,8 @@ public: ASSERT(!_client .findOne(ns(), BSON("a" - << "c")) + << "c"), + Query()) .isEmpty()); } }; @@ -196,7 +197,8 @@ public: ASSERT(!_client .findOne(ns(), BSON("a" - << "cd")) + << "cd"), + Query()) .isEmpty()); } }; @@ -207,7 +209,7 @@ public: _client.insert(ns(), BSON("a" << "b")); - _client.update(ns(), Query(), BSON("$set" << BSON("a" << 5))); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a" << 5))); ASSERT(!_client.findOne(ns(), BSON("a" << 5)).isEmpty()); } }; @@ -218,7 +220,7 @@ public: _client.insert(ns(), BSON("a" << "bcd")); - _client.update(ns(), Query(), BSON("$set" << BSON("a" << 5.0))); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a" << 5.0))); ASSERT(!_client.findOne(ns(), BSON("a" << 5.0)).isEmpty()); } }; @@ -228,11 +230,11 @@ public: void run() { // Try with upsert false first. _client.insert(ns(), BSONObj() /* empty document */); - _client.update(ns(), Query(), BSON("$setOnInsert" << BSON("a" << 1)), false); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$setOnInsert" << BSON("a" << 1)), false); ASSERT(_client.findOne(ns(), BSON("a" << 1)).isEmpty()); // Then with upsert true. - _client.update(ns(), Query(), BSON("$setOnInsert" << BSON("a" << 1)), true); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$setOnInsert" << BSON("a" << 1)), true); ASSERT(_client.findOne(ns(), BSON("a" << 1)).isEmpty()); } }; @@ -241,11 +243,11 @@ class SetOnInsertFromNonExistent : public SetBase { public: void run() { // Try with upsert false first. - _client.update(ns(), Query(), BSON("$setOnInsert" << BSON("a" << 1)), false); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$setOnInsert" << BSON("a" << 1)), false); ASSERT(_client.findOne(ns(), BSON("a" << 1)).isEmpty()); // Then with upsert true. - _client.update(ns(), Query(), BSON("$setOnInsert" << BSON("a" << 1)), true); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$setOnInsert" << BSON("a" << 1)), true); ASSERT(!_client.findOne(ns(), BSON("a" << 1)).isEmpty()); } }; @@ -253,14 +255,12 @@ public: class SetOnInsertFromNonExistentWithQuery : public SetBase { public: void run() { - Query q("{a:1}"); - // Try with upsert false first. - _client.update(ns(), q, BSON("$setOnInsert" << BSON("b" << 1)), false); + _client.update(ns(), BSON("a" << 1), BSON("$setOnInsert" << BSON("b" << 1)), false); ASSERT(_client.findOne(ns(), BSON("a" << 1)).isEmpty()); // Then with upsert true. - _client.update(ns(), q, BSON("$setOnInsert" << BSON("b" << 1)), true); + _client.update(ns(), BSON("a" << 1), BSON("$setOnInsert" << BSON("b" << 1)), true); ASSERT(!_client.findOne(ns(), BSON("a" << 1 << "b" << 1)).isEmpty()); } }; @@ -268,14 +268,12 @@ public: class SetOnInsertFromNonExistentWithQueryOverField : public SetBase { public: void run() { - Query q("{a:1}"); // same field that we'll setOnInsert on - // Try with upsert false first. - _client.update(ns(), q, BSON("$setOnInsert" << BSON("a" << 2)), false); + _client.update(ns(), BSON("a" << 1), BSON("$setOnInsert" << BSON("a" << 2)), false); ASSERT(_client.findOne(ns(), BSON("a" << 1)).isEmpty()); // Then with upsert true. - _client.update(ns(), q, BSON("$setOnInsert" << BSON("a" << 2)), true); + _client.update(ns(), BSON("a" << 1), BSON("$setOnInsert" << BSON("a" << 2)), true); ASSERT(!_client.findOne(ns(), BSON("a" << 2)).isEmpty()); } }; @@ -285,7 +283,7 @@ public: void run() { BSONObj res = fromjson("{'_id':0, a:1}"); _client.insert(ns(), res); - _client.update(ns(), Query(), BSON("$setOnInsert" << BSON("b" << 1))); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$setOnInsert" << BSON("b" << 1))); ASSERT(_client.findOne(ns(), BSON("a" << 1)).woCompare(res) == 0); } }; @@ -294,7 +292,7 @@ class SetOnInsertExisting : public SetBase { public: void run() { _client.insert(ns(), BSON("a" << 1)); - _client.update(ns(), Query(), BSON("$setOnInsert" << BSON("a" << 2))); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$setOnInsert" << BSON("a" << 2))); ASSERT(!_client.findOne(ns(), BSON("a" << 1)).isEmpty()); } }; @@ -304,14 +302,14 @@ public: void run() { // Try with upsert false first. _client.update(ns(), - Query(), + BSONObj{} /*filter*/, BSON("$set" << BSON("a" << 1) << "$setOnInsert" << BSON("b" << 2)), false); ASSERT(_client.findOne(ns(), BSON("a" << 1 << "b" << 2)).isEmpty()); // Then with upsert true. _client.update(ns(), - Query(), + BSONObj{} /*filter*/, BSON("$set" << BSON("a" << 1) << "$setOnInsert" << BSON("b" << 2)), true); ASSERT(!_client.findOne(ns(), BSON("a" << 1 << "b" << 2)).isEmpty()); @@ -336,9 +334,9 @@ class ModDotted : public SetBase { public: void run() { _client.insert(ns(), fromjson("{a:{b:4}}")); - _client.update(ns(), Query(), BSON("$inc" << BSON("a.b" << 10))); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$inc" << BSON("a.b" << 10))); ASSERT(!_client.findOne(ns(), BSON("a.b" << 14)).isEmpty()); - _client.update(ns(), Query(), BSON("$set" << BSON("a.b" << 55))); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a.b" << 55))); ASSERT(!_client.findOne(ns(), BSON("a.b" << 55)).isEmpty()); } }; @@ -348,13 +346,14 @@ public: void run() { _client.insert(ns(), fromjson("{a:{b:'cdef'}}")); _client.update(ns(), - Query(), + BSONObj{} /*filter*/, BSON("$set" << BSON("a.b" << "llll"))); ASSERT(!_client .findOne(ns(), BSON("a.b" - << "llll")) + << "llll"), + Query()) .isEmpty()); } }; @@ -364,13 +363,14 @@ public: void run() { _client.insert(ns(), fromjson("{'_id':0,a:{b:'cdef'}}")); _client.update(ns(), - Query(), + BSONObj{} /*filter*/, BSON("$set" << BSON("a.b" << "lllll"))); ASSERT(_client .findOne(ns(), BSON("a.b" - << "lllll")) + << "lllll"), + Query()) .woCompare(fromjson("{'_id':0,a:{b:'lllll'}}")) == 0); } }; @@ -386,7 +386,8 @@ public: ASSERT(_client .findOne(ns(), BSON("a.b" - << "lllll")) + << "lllll"), + Query()) .woCompare(fromjson("{'_id':0,a:{b:'lllll'}}")) == 0); } }; @@ -396,12 +397,13 @@ public: void run() { _client.insert(ns(), fromjson("{'_id':0,a:{c:4}}")); _client.update(ns(), - Query(), + BSONObj{} /*filter*/, BSON("$set" << BSON("a.b" << "lllll"))); ASSERT_EQUALS(mutablebson::unordered(_client.findOne(ns(), BSON("a.b" - << "lllll"))), + << "lllll"), + Query())), mutablebson::unordered(fromjson("{'_id':0,a:{b:'lllll',c:4}}"))); } }; @@ -410,8 +412,9 @@ class IncMissing : public SetBase { public: void run() { _client.insert(ns(), fromjson("{'_id':0}")); - _client.update(ns(), Query(), BSON("$inc" << BSON("f" << 3.0))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,f:3}")) == 0); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$inc" << BSON("f" << 3.0))); + ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,f:3}")) == + 0); } }; @@ -419,8 +422,8 @@ class MultiInc : public SetBase { public: string s() { stringstream ss; - unique_ptr<DBClientCursor> cc = - _client.query(NamespaceString(ns()), Query().sort(BSON("_id" << 1))); + unique_ptr<DBClientCursor> cc = _client.query( + NamespaceString(ns()), BSONObj{} /*filter*/, Query().sort(BSON("_id" << 1))); bool first = true; while (cc->more()) { if (first) @@ -455,8 +458,9 @@ class UnorderedNewSet : public SetBase { public: void run() { _client.insert(ns(), fromjson("{'_id':0}")); - _client.update(ns(), Query(), BSON("$set" << BSON("f.g.h" << 3.0 << "f.g.a" << 2.0))); - ASSERT_EQUALS(mutablebson::unordered(_client.findOne(ns(), Query())), + _client.update( + ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("f.g.h" << 3.0 << "f.g.a" << 2.0))); + ASSERT_EQUALS(mutablebson::unordered(_client.findOne(ns(), BSONObj{} /*filter*/)), mutablebson::unordered(fromjson("{'_id':0,f:{g:{a:2,h:3}}}"))); } }; @@ -466,7 +470,7 @@ public: void run() { _client.insert(ns(), fromjson("{'_id':0}")); _client.update(ns(), BSONObj(), BSON("$set" << BSON("f.g.h.b" << 3.0 << "f.g.a.b" << 2.0))); - ASSERT_EQUALS(mutablebson::unordered(_client.findOne(ns(), Query())), + ASSERT_EQUALS(mutablebson::unordered(_client.findOne(ns(), BSONObj{} /*filter*/)), mutablebson::unordered(fromjson("{'_id':0,f:{g:{a:{b:2},h:{b:3}}}}"))); } }; @@ -476,10 +480,11 @@ public: void run() { _client.insert(ns(), fromjson("{'_id':0,z:[4,'b']}")); _client.update(ns(), - Query(), + BSONObj{} /*filter*/, BSON("$set" << BSON("z.0" << "a"))); - ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,z:['a','b']}")); + ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), + fromjson("{'_id':0,z:['a','b']}")); } }; @@ -487,8 +492,9 @@ class AttemptEmbedInExistingNum : public SetBase { public: void run() { _client.insert(ns(), fromjson("{'_id':0,a:1}")); - _client.update(ns(), Query(), BSON("$set" << BSON("a.b" << 1))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:1}")) == 0); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a.b" << 1))); + ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:1}")) == + 0); } }; @@ -496,8 +502,8 @@ class AttemptEmbedConflictsWithOtherSet : public SetBase { public: void run() { _client.insert(ns(), fromjson("{'_id':0}")); - _client.update(ns(), Query(), BSON("$set" << BSON("a" << 2 << "a.b" << 1))); - ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0}")); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a" << 2 << "a.b" << 1))); + ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), fromjson("{'_id':0}")); } }; @@ -505,8 +511,10 @@ class ModMasksEmbeddedConflict : public SetBase { public: void run() { _client.insert(ns(), fromjson("{'_id':0,a:{b:2}}")); - _client.update(ns(), Query(), BSON("$set" << BSON("a" << 2 << "a.b" << 1))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:{b:2}}")) == 0); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a" << 2 << "a.b" << 1))); + ASSERT( + _client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:{b:2}}")) == + 0); } }; @@ -514,23 +522,26 @@ class ModOverwritesExistingObject : public SetBase { public: void run() { _client.insert(ns(), fromjson("{'_id':0,a:{b:2}}")); - _client.update(ns(), Query(), BSON("$set" << BSON("a" << BSON("c" << 2)))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:{c:2}}")) == 0); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a" << BSON("c" << 2)))); + ASSERT( + _client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:{c:2}}")) == + 0); } }; class InvalidEmbeddedSet : public Fail { public: virtual BSONObj doIt() { - return _client.updateAcknowledged(ns(), Query(), BSON("$set" << BSON("a." << 1))); + return _client.updateAcknowledged( + ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a." << 1))); } }; class UpsertMissingEmbedded : public SetBase { public: void run() { - _client.update(ns(), Query(), BSON("$set" << BSON("a.b" << 1)), true); - ASSERT(!_client.findOne(ns(), QUERY("a.b" << 1)).isEmpty()); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a.b" << 1)), true); + ASSERT(!_client.findOne(ns(), BSON("a.b" << 1)).isEmpty()); } }; @@ -538,8 +549,9 @@ class Push : public SetBase { public: void run() { _client.insert(ns(), fromjson("{'_id':0,a:[1]}")); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << 5))); - ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,5]}")); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << 5))); + ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), + fromjson("{'_id':0,a:[1,5]}")); } }; @@ -547,8 +559,9 @@ class PushInvalidEltType : public SetBase { public: void run() { _client.insert(ns(), fromjson("{'_id':0,a:1}")); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << 5))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:1}")) == 0); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << 5))); + ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:1}")) == + 0); } }; @@ -556,8 +569,11 @@ class PushConflictsWithOtherMod : public SetBase { public: void run() { _client.insert(ns(), fromjson("{'_id':0,a:[1]}")); - _client.update(ns(), Query(), BSON("$set" << BSON("a" << 1) << "$push" << BSON("a" << 5))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[1]}")) == 0); + _client.update(ns(), + BSONObj{} /*filter*/, + BSON("$set" << BSON("a" << 1) << "$push" << BSON("a" << 5))); + ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[1]}")) == + 0); } }; @@ -565,8 +581,8 @@ class PushFromNothing : public SetBase { public: void run() { _client.insert(ns(), fromjson("{'_id':0}")); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << 5))); - ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[5]}")); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << 5))); + ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), fromjson("{'_id':0,a:[5]}")); } }; @@ -574,8 +590,9 @@ class PushFromEmpty : public SetBase { public: void run() { _client.insert(ns(), fromjson("{'_id':0,a:[]}")); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << 5))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[5]}")) == 0); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << 5))); + ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[5]}")) == + 0); } }; @@ -583,8 +600,9 @@ class PushInsideNothing : public SetBase { public: void run() { _client.insert(ns(), fromjson("{'_id':0}")); - _client.update(ns(), Query(), BSON("$push" << BSON("a.b" << 5))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:{b:[5]}}")) == 0); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a.b" << 5))); + ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/) + .woCompare(fromjson("{'_id':0,a:{b:[5]}}")) == 0); } }; @@ -592,9 +610,10 @@ class CantPushInsideOtherMod : public SetBase { public: void run() { _client.insert(ns(), fromjson("{'_id':0}")); - _client.update( - ns(), Query(), BSON("$set" << BSON("a" << BSONObj()) << "$push" << BSON("a.b" << 5))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0}")) == 0); + _client.update(ns(), + BSONObj{} /*filter*/, + BSON("$set" << BSON("a" << BSONObj()) << "$push" << BSON("a.b" << 5))); + ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0}")) == 0); } }; @@ -602,8 +621,11 @@ class CantPushTwice : public SetBase { public: void run() { _client.insert(ns(), fromjson("{'_id':0,a:[]}")); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << 4) << "$push" << BSON("a" << 5))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[]}")) == 0); + _client.update(ns(), + BSONObj{} /*filter*/, + BSON("$push" << BSON("a" << 4) << "$push" << BSON("a" << 5))); + ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[]}")) == + 0); } }; @@ -611,8 +633,10 @@ class SetEncapsulationConflictsWithExistingType : public SetBase { public: void run() { _client.insert(ns(), fromjson("{'_id':0,a:{b:4}}")); - _client.update(ns(), Query(), BSON("$set" << BSON("a.b.c" << 4.0))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:{b:4}}")) == 0); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a.b.c" << 4.0))); + ASSERT( + _client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:{b:4}}")) == + 0); } }; @@ -620,8 +644,10 @@ class CantPushToParent : public SetBase { public: void run() { _client.insert(ns(), fromjson("{'_id':0,a:{b:4}}")); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << 4.0))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:{b:4}}")) == 0); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << 4.0))); + ASSERT( + _client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:{b:4}}")) == + 0); } }; @@ -631,8 +657,9 @@ public: _client.insert(ns(), fromjson("{'_id':0,a:[1]}")); // { $push : { a : { $each : [ 2, 3 ] } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(2 << 3)); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj))); - ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,2,3]}")); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj))); + ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), + fromjson("{'_id':0,a:[1,2,3]}")); } }; @@ -642,8 +669,9 @@ public: _client.insert(ns(), fromjson("{'_id':0,a:[]}")); // { $push : { a : { $each : [ 1, 2, 3 ] } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(1 << 2 << 3)); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj))); - ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,2,3]}")); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj))); + ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), + fromjson("{'_id':0,a:[1,2,3]}")); } }; @@ -653,8 +681,9 @@ public: _client.insert(ns(), fromjson("{'_id':0,a:[1]}")); // { $push : { a : { $each : [ 2 ] , $slice : -3 } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(2) << "$slice" << -3); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj))); - ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,2]}")); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj))); + ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), + fromjson("{'_id':0,a:[1,2]}")); } }; @@ -664,8 +693,9 @@ public: _client.insert(ns(), fromjson("{'_id':0,a:[1]}")); // { $push : { a : { $each : [ 2 ] , $slice : -2 } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(2) << "$slice" << -2); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj))); - ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,2]}")); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj))); + ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), + fromjson("{'_id':0,a:[1,2]}")); } }; @@ -675,8 +705,9 @@ public: _client.insert(ns(), fromjson("{'_id':0,a:[1]}")); // { $push : { a : { $each : [ 2 , 3 ] , $slice : -2 } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(2 << 3) << "$slice" << -2); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj))); - ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,3]}")); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj))); + ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), + fromjson("{'_id':0,a:[2,3]}")); } }; @@ -686,8 +717,9 @@ public: _client.insert(ns(), fromjson("{'_id':0,a:[1,2]}")); // { $push : { a : { $each : [ 3 ] , $slice : -2 } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << -2); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj))); - ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,3]}")); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj))); + ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), + fromjson("{'_id':0,a:[2,3]}")); } }; @@ -697,8 +729,8 @@ public: _client.insert(ns(), fromjson("{'_id':0,a:[1,2]}")); // { $push : { a : { $each : [ 3 ] , $slice : 0 } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << 0); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj))); - ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[]}")); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj))); + ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), fromjson("{'_id':0,a:[]}")); } }; @@ -708,8 +740,8 @@ public: _client.insert(ns(), fromjson("{'_id':0}")); // { $push : { a : { $each : [ 3 ] , $slice : 0 } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << 0); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj))); - ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[]}")); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj))); + ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), fromjson("{'_id':0,a:[]}")); } }; @@ -719,8 +751,9 @@ public: _client.insert(ns(), fromjson("{'_id':0}")); // { $push : { a : { $each : [ 1 , 2 ] , $slice : -3 } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(1 << 2) << "$slice" << -3); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj))); - ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1,2]}")); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj))); + ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), + fromjson("{'_id':0,a:[1,2]}")); } }; @@ -730,8 +763,9 @@ public: _client.insert(ns(), fromjson("{'_id':0}")); // { $push : { a : { $each : [ 1 , 2 , 3 ] , $slice : -2 } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(1 << 2 << 3) << "$slice" << -2); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj))); - ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,3]}")); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj))); + ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), + fromjson("{'_id':0,a:[2,3]}")); } }; @@ -741,8 +775,8 @@ public: _client.insert(ns(), fromjson("{'_id':0,a:[]}")); // { $push : { a : { $each : [ 1 ] , $slice : -3 } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(1) << "$slice" << -3); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj))); - ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[1]}")); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj))); + ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), fromjson("{'_id':0,a:[1]}")); } }; @@ -752,8 +786,9 @@ public: _client.insert(ns(), fromjson("{'_id':0,a:[]}")); // { $push : { a : { $each : [ 1 , 2 , 3 ] , $slice : -2 } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(1 << 2 << 3) << "$slice" << -2); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj))); - ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,3]}")); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj))); + ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), + fromjson("{'_id':0,a:[2,3]}")); } }; @@ -764,8 +799,10 @@ public: // { $push: { a: { $each: [ 5 ] , $slice : -2 }, { b: $each: [ 6 ] , $slice: -1 } } } BSONObj objA = BSON("$each" << BSON_ARRAY(5) << "$slice" << -2); BSONObj objB = BSON("$each" << BSON_ARRAY(6) << "$slice" << -1); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << objA << "b" << objB))); - ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,5],b:[6]}")); + _client.update( + ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << objA << "b" << objB))); + ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), + fromjson("{'_id':0,a:[2,5],b:[6]}")); } }; @@ -775,8 +812,9 @@ public: _client.insert(ns(), fromjson("{'_id':0,a:[1,2],b:[3]}")); // { $push : { a : { $each : [ 5 ] , $slice : -2 } , { b : 4 } } BSONObj objA = BSON("$each" << BSON_ARRAY(5) << "$slice" << -2); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << objA << "b" << 4))); - ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,5],b:[3,4]}")); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << objA << "b" << 4))); + ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), + fromjson("{'_id':0,a:[2,5],b:[3,4]}")); } }; @@ -787,8 +825,10 @@ public: // { $push: { a: { $each: [ 5 ] , $slice: -2 } , { a: $each: [ 6 ] , $slice: -1 } } } BSONObj objA = BSON("$each" << BSON_ARRAY(5) << "$slice" << -2); BSONObj other = BSON("$each" << BSON_ARRAY(6) << "$slice" << -1); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << objA << "a" << other))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[1],b:[3]}")) == 0); + _client.update( + ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << objA << "a" << other))); + ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/) + .woCompare(fromjson("{'_id':0,a:[1],b:[3]}")) == 0); } }; @@ -798,8 +838,9 @@ public: _client.insert(ns(), fromjson("{'_id':0,a:[1],b:[3]}")); // { $push : { a : { $each : [ 5 ] , $slice : -2 } , { a : 4 } } } BSONObj objA = BSON("$each" << BSON_ARRAY(5) << "$slice" << -2); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << objA << "a" << 4))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[1],b:[3]}")) == 0); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << objA << "a" << 4))); + ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/) + .woCompare(fromjson("{'_id':0,a:[1],b:[3]}")) == 0); } }; @@ -809,8 +850,10 @@ public: _client.insert(ns(), fromjson("{'_id':0,a:[1,2]}")); // { $push : { a : { $each : 3 , $slice : -2 } } } BSONObj pushObj = BSON("$each" << 3 << "$slice" << -2); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[1,2]}")) == 0); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj))); + ASSERT( + _client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[1,2]}")) == + 0); } }; @@ -820,8 +863,10 @@ public: _client.insert(ns(), fromjson("{'_id':0,a:[1,2]}")); // { $push : { a : { $each : [ 3 ], $slice : [ -2 ] } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << BSON_ARRAY(-2)); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[1,2]}")) == 0); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj))); + ASSERT( + _client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[1,2]}")) == + 0); } }; @@ -831,8 +876,10 @@ public: _client.insert(ns(), fromjson("{'_id':0,a:[1,2]}")); // { $push : { a : { $each : [ 3 ], $slice : 2 } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << 2); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[1,2]}")) == 0); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj))); + ASSERT( + _client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[1,2]}")) == + 0); } }; @@ -843,8 +890,10 @@ public: _client.insert(ns(), fromjson("{'_id':0,a:[1,2]}")); // { $push : { a : { $each : [ 3 ], $slice : -2.1 } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << -2.1); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[1,2]}")) == 0); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj))); + ASSERT( + _client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[1,2]}")) == + 0); } }; @@ -854,8 +903,9 @@ public: _client.insert(ns(), fromjson("{'_id':0,a:[1,2]}")); // { $push : { a : { $each : [ 3 ], $slice : -2.0 } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << -2.0); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj))); - ASSERT_BSONOBJ_EQ(_client.findOne(ns(), Query()), fromjson("{'_id':0,a:[2,3]}")); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj))); + ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), + fromjson("{'_id':0,a:[2,3]}")); } }; @@ -865,8 +915,10 @@ public: _client.insert(ns(), fromjson("{'_id':0,a:[1,2]}")); // { $push : { a : { $each : [ 3 ], $xxxx : 2 } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$xxxx" << 2); - _client.update(ns(), Query(), BSON("$push" << BSON("a" << pushObj))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:[1,2]}")) == 0); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj))); + ASSERT( + _client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[1,2]}")) == + 0); } }; @@ -955,8 +1007,8 @@ protected: void check(BSONObj expected) { std::cout << expected.toString() << std::endl; - std::cout << _client.findOne(ns(), Query()) << std::endl; - ASSERT(_client.findOne(ns(), Query()).woCompare(expected) == 0); + std::cout << _client.findOne(ns(), BSONObj{} /*filter*/) << std::endl; + ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/).woCompare(expected) == 0); } private: @@ -993,16 +1045,16 @@ public: switch (i) { case TOPK_ASC: case BOTTOMK_ASC: - _client.update(ns(), Query(), getUpdate(i)); - result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, getUpdate(i)); + result = _client.findOne(ns(), BSONObj{} /*filter*/); expected = fromjson("{'_id':0,x:[{a:1,b:1},{a:2,b:2}]}"); ASSERT_BSONOBJ_EQ(result, expected); break; case TOPK_DESC: case BOTTOMK_DESC: - _client.update(ns(), Query(), getUpdate(i)); - result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, getUpdate(i)); + result = _client.findOne(ns(), BSONObj{} /*filter*/); expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}"); ASSERT_BSONOBJ_EQ(result, expected); break; @@ -1038,16 +1090,16 @@ public: switch (i) { case TOPK_ASC: case BOTTOMK_ASC: - _client.update(ns(), Query(), getUpdate(i)); - result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, getUpdate(i)); + result = _client.findOne(ns(), BSONObj{} /*filter*/); expected = fromjson("{'_id':0,x:[{a:1,b:1},{a:2,b:2}]}"); ASSERT_BSONOBJ_EQ(result, expected); break; case TOPK_DESC: case BOTTOMK_DESC: - _client.update(ns(), Query(), getUpdate(i)); - result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, getUpdate(i)); + result = _client.findOne(ns(), BSONObj{} /*filter*/); expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}"); ASSERT_BSONOBJ_EQ(result, expected); break; @@ -1082,15 +1134,15 @@ public: BSONObj expected; switch (i) { case TOPK_ASC: - _client.update(ns(), Query(), getUpdate(i)); - result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, getUpdate(i)); + result = _client.findOne(ns(), BSONObj{} /*filter*/); expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:3,b:3}]}"); ASSERT_BSONOBJ_EQ(result, expected); break; case TOPK_DESC: - _client.update(ns(), Query(), getUpdate(i)); - result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, getUpdate(i)); + result = _client.findOne(ns(), BSONObj{} /*filter*/); expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}"); ASSERT_BSONOBJ_EQ(result, expected); break; @@ -1129,8 +1181,8 @@ public: BSONObj result; BSONObj expected; - _client.update(ns(), Query(), getUpdate(i)); - result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, getUpdate(i)); + result = _client.findOne(ns(), BSONObj{} /*filter*/); expected = fromjson("{'_id':0,x:[]}"); ASSERT_BSONOBJ_EQ(result, expected); } @@ -1162,8 +1214,8 @@ public: BSONObj result; BSONObj expected; - _client.update(ns(), Query(), getUpdate(i)); - result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, getUpdate(i)); + result = _client.findOne(ns(), BSONObj{} /*filter*/); expected = fromjson("{'_id':0,x:[]}"); ASSERT_BSONOBJ_EQ(result, expected); } @@ -1199,16 +1251,16 @@ public: switch (i) { case TOPK_ASC: case BOTTOMK_ASC: - _client.update(ns(), Query(), getUpdate(i)); - result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, getUpdate(i)); + result = _client.findOne(ns(), BSONObj{} /*filter*/); expected = fromjson("{'_id':0,x:[{a:1,b:1},{a:2,b:2}]}"); ASSERT_BSONOBJ_EQ(result, expected); break; case TOPK_DESC: case BOTTOMK_DESC: - _client.update(ns(), Query(), getUpdate(i)); - result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, getUpdate(i)); + result = _client.findOne(ns(), BSONObj{} /*filter*/); expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}"); ASSERT_BSONOBJ_EQ(result, expected); break; @@ -1244,15 +1296,15 @@ public: BSONObj expected; switch (i) { case TOPK_ASC: - _client.update(ns(), Query(), getUpdate(i)); - result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, getUpdate(i)); + result = _client.findOne(ns(), BSONObj{} /*filter*/); expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:3,b:3}]}"); ASSERT_BSONOBJ_EQ(result, expected); break; case TOPK_DESC: - _client.update(ns(), Query(), getUpdate(i)); - result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, getUpdate(i)); + result = _client.findOne(ns(), BSONObj{} /*filter*/); expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}"); ASSERT_BSONOBJ_EQ(result, expected); break; @@ -1294,16 +1346,16 @@ public: switch (i) { case TOPK_ASC: case BOTTOMK_ASC: - _client.update(ns(), Query(), getUpdate(i)); - result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, getUpdate(i)); + result = _client.findOne(ns(), BSONObj{} /*filter*/); expected = fromjson("{'_id':0,x:[{a:1,b:1},{a:2,b:2}]}"); ASSERT_BSONOBJ_EQ(result, expected); break; case TOPK_DESC: case BOTTOMK_DESC: - _client.update(ns(), Query(), getUpdate(i)); - result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, getUpdate(i)); + result = _client.findOne(ns(), BSONObj{} /*filter*/); expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}"); ASSERT_BSONOBJ_EQ(result, expected); break; @@ -1339,15 +1391,15 @@ public: BSONObj expected; switch (i) { case TOPK_ASC: - _client.update(ns(), Query(), getUpdate(i)); - result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, getUpdate(i)); + result = _client.findOne(ns(), BSONObj{} /*filter*/); expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:3,b:3}]}"); ASSERT_BSONOBJ_EQ(result, expected); break; case TOPK_DESC: - _client.update(ns(), Query(), getUpdate(i)); - result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, getUpdate(i)); + result = _client.findOne(ns(), BSONObj{} /*filter*/); expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}"); ASSERT_BSONOBJ_EQ(result, expected); break; @@ -1507,37 +1559,37 @@ public: // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {a..d:1} } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort" << BSON("a..d" << 1)); - _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj))); - BSONObj result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj))); + BSONObj result = _client.findOne(ns(), BSONObj{} /*filter*/); ASSERT_BSONOBJ_EQ(result, expected); // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {a.:1} } } } pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort" << BSON("a." << 1)); - _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj))); - result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj))); + result = _client.findOne(ns(), BSONObj{} /*filter*/); ASSERT_BSONOBJ_EQ(result, expected); // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {.b:1} } } } pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort" << BSON(".b" << 1)); - _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj))); - result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj))); + result = _client.findOne(ns(), BSONObj{} /*filter*/); ASSERT_BSONOBJ_EQ(result, expected); // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {.:1} } } } pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort" << BSON("." << 1)); - _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj))); - result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj))); + result = _client.findOne(ns(), BSONObj{} /*filter*/); ASSERT_BSONOBJ_EQ(result, expected); // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {'':1} } } } pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort" << BSON("" << 1)); - _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj))); - result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj))); + result = _client.findOne(ns(), BSONObj{} /*filter*/); ASSERT_BSONOBJ_EQ(result, expected); } }; @@ -1550,8 +1602,8 @@ public: // { $push : { x : { $each : [ 3 ], $slice:-2, $sort : {a:1} } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << -2 << "$sort" << BSON("a" << 1)); - _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj))); - BSONObj result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj))); + BSONObj result = _client.findOne(ns(), BSONObj{} /*filter*/); ASSERT_BSONOBJ_EQ(result, expected); } }; @@ -1564,8 +1616,8 @@ public: // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : 2} } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort" << 2); - _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj))); - BSONObj result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj))); + BSONObj result = _client.findOne(ns(), BSONObj{} /*filter*/); ASSERT_BSONOBJ_EQ(result, expected); } }; @@ -1578,8 +1630,8 @@ public: // { $push : { x : { $each : [ {a:3} ], $slice:2, $sort : {a:1} } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << 2 << "$sort" << BSON("a" << 1)); - _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj))); - BSONObj result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj))); + BSONObj result = _client.findOne(ns(), BSONObj{} /*filter*/); ASSERT_BSONOBJ_EQ(result, expected); } }; @@ -1592,8 +1644,8 @@ public: // { $push : { x : { $each : [ {a:3} ], $slice:-2.1, $sort : {a:1} } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2.1 << "$sort" << BSON("a" << 1)); - _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj))); - BSONObj result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj))); + BSONObj result = _client.findOne(ns(), BSONObj{} /*filter*/); ASSERT_BSONOBJ_EQ(result, expected); } }; @@ -1605,9 +1657,9 @@ public: // { $push : { x : { $each : [ {a:3} ], $slice:-2.0, $sort : {a:1} } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2.0 << "$sort" << BSON("a" << 1)); - _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj))); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj))); BSONObj expected = fromjson("{'_id':0,x:[{a:2},{a:3}]}"); - BSONObj result = _client.findOne(ns(), Query()); + BSONObj result = _client.findOne(ns(), BSONObj{} /*filter*/); ASSERT_BSONOBJ_EQ(result, expected); } }; @@ -1620,8 +1672,8 @@ public: // { $push : { x : { $each : [ {a:3} ], $slice:-2.0, $sort : [2, 1] } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2.0 << "$sort" << BSON_ARRAY(2 << 1)); - _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj))); - BSONObj result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj))); + BSONObj result = _client.findOne(ns(), BSONObj{} /*filter*/); ASSERT_BSONOBJ_EQ(result, expected); } }; @@ -1634,8 +1686,8 @@ public: // { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {a:10} } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort" << BSON("a" << 10)); - _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj))); - BSONObj result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj))); + BSONObj result = _client.findOne(ns(), BSONObj{} /*filter*/); ASSERT_BSONOBJ_EQ(result, expected); } }; @@ -1647,9 +1699,9 @@ public: // { $push : { x : { $each : [ {a:2} ], $sort: {a:1}, $slice:-2 } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1) << "$slice" << -2.0); - _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj))); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj))); BSONObj expected = fromjson("{'_id':0,x:[{a:2},{a:3}]}"); - BSONObj result = _client.findOne(ns(), Query()); + BSONObj result = _client.findOne(ns(), BSONObj{} /*filter*/); ASSERT_BSONOBJ_EQ(result, expected); } }; @@ -1662,8 +1714,8 @@ public: // { $push : { x : { $each : [ {a:2} ], $sort : {a:1}, $sort: {a:1} } } } BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1) << "$sort" << BSON("a" << 1)); - _client.update(ns(), Query(), BSON("$push" << BSON("x" << pushObj))); - BSONObj result = _client.findOne(ns(), Query()); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj))); + BSONObj result = _client.findOne(ns(), BSONObj{} /*filter*/); ASSERT_BSONOBJ_EQ(result, expected); } }; @@ -1672,8 +1724,10 @@ class CantIncParent : public SetBase { public: void run() { _client.insert(ns(), fromjson("{'_id':0,a:{b:4}}")); - _client.update(ns(), Query(), BSON("$inc" << BSON("a" << 4.0))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:{b:4}}")) == 0); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$inc" << BSON("a" << 4.0))); + ASSERT( + _client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:{b:4}}")) == + 0); } }; @@ -1681,8 +1735,9 @@ class DontDropEmpty : public SetBase { public: void run() { _client.insert(ns(), fromjson("{'_id':0,a:{b:{}}}")); - _client.update(ns(), Query(), BSON("$set" << BSON("a.c" << 4.0))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:{b:{},c:4}}")) == 0); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a.c" << 4.0))); + ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/) + .woCompare(fromjson("{'_id':0,a:{b:{},c:4}}")) == 0); } }; @@ -1690,8 +1745,9 @@ class InsertInEmpty : public SetBase { public: void run() { _client.insert(ns(), fromjson("{'_id':0,a:{b:{}}}")); - _client.update(ns(), Query(), BSON("$set" << BSON("a.b.f" << 4.0))); - ASSERT(_client.findOne(ns(), Query()).woCompare(fromjson("{'_id':0,a:{b:{f:4}}}")) == 0); + _client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a.b.f" << 4.0))); + ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/) + .woCompare(fromjson("{'_id':0,a:{b:{f:4}}}")) == 0); } }; @@ -1700,8 +1756,9 @@ public: void run() { ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), BSON("a" << 1))); _client.insert(ns(), fromjson("{'_id':0}")); - _client.update(ns(), Query(), fromjson("{$set:{'a.b':4}}")); - ASSERT_BSONOBJ_EQ(fromjson("{'_id':0,a:{b:4}}"), _client.findOne(ns(), Query())); + _client.update(ns(), BSONObj{} /*filter*/, fromjson("{$set:{'a.b':4}}")); + ASSERT_BSONOBJ_EQ(fromjson("{'_id':0,a:{b:4}}"), + _client.findOne(ns(), BSONObj{} /*filter*/)); ASSERT_BSONOBJ_EQ( fromjson("{'_id':0,a:{b:4}}"), _client.findOne(ns(), fromjson("{'a.b':4}"))); // make sure the index works @@ -1713,7 +1770,7 @@ public: void run() { _client.insert(ns(), BSON("_id" << 55 << "i" << 5)); _client.update(ns(), BSON("i" << 5), BSON("i" << 6)); - ASSERT(!_client.findOne(ns(), Query(BSON("_id" << 55)).hint("{\"_id\":1}")).isEmpty()); + ASSERT(!_client.findOne(ns(), BSON("_id" << 55), Query().hint(BSON("_id" << 1))).isEmpty()); } }; @@ -1722,7 +1779,7 @@ public: void run() { _client.update(ns(), BSONObj(), BSON("_id" << 52307 << "$set" << BSON("q" << 3)), true); ASSERT_BSONOBJ_EQ(fromjson("{'_id':52307,$set:{q:3}}"), - _client.findOne(ns(), Query(BSON("_id" << 52307)))); + _client.findOne(ns(), BSON("_id" << 52307))); } }; @@ -1731,7 +1788,7 @@ public: void run() { _client.insert(ns(), BSON("a" << 5)); _client.update(ns(), BSON("a" << 5), fromjson("{$set:{b:null}}")); - ASSERT_EQUALS(jstNULL, _client.findOne(ns(), QUERY("a" << 5)).getField("b").type()); + ASSERT_EQUALS(jstNULL, _client.findOne(ns(), BSON("a" << 5)).getField("b").type()); } }; @@ -1744,7 +1801,7 @@ public: _client.update(ns(), BSONObj(), BSON("$set" << BSON("x.b" << 1 << "x.c" << 1))); ASSERT_BSONOBJ_EQ(BSON("_id" << 0 << "a" << 1 << "x" << BSON("b" << 1 << "c" << 1) << "x" << BSONObj() << "z" << 5), - _client.findOne(ns(), BSONObj())); + _client.findOne(ns(), BSONObj{} /*filter*/)); } }; @@ -1758,7 +1815,7 @@ public: ns(), BSONObj(), BSON("$set" << BSON("x.b" << 1 << "x.c" << 1 << "x.d" << 1))); ASSERT_BSONOBJ_EQ(BSON("_id" << 0 << "x" << BSON("b" << 1 << "c" << 1 << "d" << 1) << "x" << BSONObj() << "x" << BSONObj()), - _client.findOne(ns(), BSONObj())); + _client.findOne(ns(), BSONObj{} /*filter*/)); } }; @@ -1768,7 +1825,7 @@ public: _client.insert(ns(), BSON("_id" << 0 << "x" << 5)); _client.update(ns(), BSONObj(), BSON("$set" << BSON("a" << 1 << "b" << 1 << "x" << 10))); ASSERT_EQUALS(mutablebson::unordered(BSON("_id" << 0 << "a" << 1 << "b" << 1 << "x" << 10)), - mutablebson::unordered(_client.findOne(ns(), BSONObj()))); + mutablebson::unordered(_client.findOne(ns(), BSONObj{} /*filter*/))); } }; @@ -1787,7 +1844,7 @@ protected: } BSONObj findOne() { - return _client.findOne(ns(), BSONObj()); + return _client.findOne(ns(), BSONObj{} /*filter*/); } void test(const char* initial, const char* mod, const char* after) { diff --git a/src/mongo/rpc/op_msg_integration_test.cpp b/src/mongo/rpc/op_msg_integration_test.cpp index d2371da22ad..9d119c391f6 100644 --- a/src/mongo/rpc/op_msg_integration_test.cpp +++ b/src/mongo/rpc/op_msg_integration_test.cpp @@ -1165,8 +1165,8 @@ TEST(OpMsg, ExhaustWithDBClientCursorBehavesCorrectly) { // Open an exhaust cursor. int batchSize = 2; - auto cursor = - conn->query(nss, Query().sort("_id", 1), 0, 0, nullptr, QueryOption_Exhaust, batchSize); + auto cursor = conn->query( + nss, BSONObj{}, Query().sort("_id", 1), 0, 0, nullptr, QueryOption_Exhaust, batchSize); // Verify that the documents are returned properly. Exhaust cursors should still receive results // in batches, so we check that these batches correspond to the given specified batch size. diff --git a/src/mongo/scripting/engine.cpp b/src/mongo/scripting/engine.cpp index 09e40facbbf..397adff2a76 100644 --- a/src/mongo/scripting/engine.cpp +++ b/src/mongo/scripting/engine.cpp @@ -233,7 +233,7 @@ void Scope::loadStored(OperationContext* opCtx, bool ignoreNotConnected) { auto directDBClient = DBDirectClientFactory::get(opCtx).create(opCtx); unique_ptr<DBClientCursor> c = - directDBClient->query(coll, Query(), 0, 0, nullptr, QueryOption_SecondaryOk, 0); + directDBClient->query(coll, BSONObj{}, Query(), 0, 0, nullptr, QueryOption_SecondaryOk, 0); massert(16669, "unable to get db client cursor from query", c.get()); set<string> thisTime; diff --git a/src/mongo/scripting/mozjs/mongo.cpp b/src/mongo/scripting/mozjs/mongo.cpp index 9a27f8a5e44..a850a7a9d1a 100644 --- a/src/mongo/scripting/mozjs/mongo.cpp +++ b/src/mongo/scripting/mozjs/mongo.cpp @@ -367,8 +367,10 @@ void MongoBase::Functions::find::call(JSContext* cx, JS::CallArgs args) { int batchSize = ValueWriter(cx, args.get(5)).toInt32(); int options = ValueWriter(cx, args.get(6)).toInt32(); + const Query query = Query::fromBSONDeprecated(q); std::unique_ptr<DBClientCursor> cursor(conn->query(NamespaceString(ns), - q, + query.getFilter(), + query, limit, nToSkip, haveFields ? &fields : nullptr, diff --git a/src/mongo/shell/encrypted_dbclient_base.cpp b/src/mongo/shell/encrypted_dbclient_base.cpp index 96bf43f83a4..c547e51df59 100644 --- a/src/mongo/shell/encrypted_dbclient_base.cpp +++ b/src/mongo/shell/encrypted_dbclient_base.cpp @@ -508,15 +508,23 @@ JS::Value EncryptedDBClientBase::getCollection() const { std::unique_ptr<DBClientCursor> EncryptedDBClientBase::query( const NamespaceStringOrUUID& nsOrUuid, - Query query, + const BSONObj& filter, + const Query& querySettings, int limit, int nToSkip, const BSONObj* fieldsToReturn, int queryOptions, int batchSize, boost::optional<BSONObj> readConcernObj) { - return _conn->query( - nsOrUuid, query, limit, nToSkip, fieldsToReturn, queryOptions, batchSize, readConcernObj); + return _conn->query(nsOrUuid, + filter, + querySettings, + limit, + nToSkip, + fieldsToReturn, + queryOptions, + batchSize, + readConcernObj); } bool EncryptedDBClientBase::isFailed() const { @@ -616,8 +624,12 @@ std::shared_ptr<SymmetricKey> EncryptedDBClientBase::getDataKey(const UUID& uuid std::shared_ptr<SymmetricKey> EncryptedDBClientBase::getDataKeyFromDisk(const UUID& uuid) { NamespaceString fullNameNS = getCollectionNS(); - BSONObj dataKeyObj = _conn->findOne( - fullNameNS.ns(), QUERY("_id" << uuid), nullptr, 0, repl::ReadConcernArgs::kImplicitDefault); + BSONObj dataKeyObj = _conn->findOne(fullNameNS.ns(), + BSON("_id" << uuid), + Query(), + nullptr, + 0, + repl::ReadConcernArgs::kImplicitDefault); if (dataKeyObj.isEmpty()) { uasserted(ErrorCodes::BadValue, "Invalid keyID."); } diff --git a/src/mongo/shell/encrypted_dbclient_base.h b/src/mongo/shell/encrypted_dbclient_base.h index 1f3750b8c45..eaa496834da 100644 --- a/src/mongo/shell/encrypted_dbclient_base.h +++ b/src/mongo/shell/encrypted_dbclient_base.h @@ -120,7 +120,8 @@ public: std::unique_ptr<DBClientCursor> query( const NamespaceStringOrUUID& nsOrUuid, - Query query, + const BSONObj& filter, + const Query& querySettings, int limit, int nToSkip, const BSONObj* fieldsToReturn, |