summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Storch <david.storch@mongodb.com>2021-10-21 16:29:10 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-11-10 17:13:38 +0000
commit1b51a502f9201aacf760ba37b02888bbbff831da (patch)
treece2ff376a49ae3738930f4ce42eb2fbb7036582c
parentb9dd38f987381211ca5217063c949638f028b592 (diff)
downloadmongo-1b51a502f9201aacf760ba37b02888bbbff831da.tar.gz
SERVER-59512 add new, cleaner query interface to DBClientBase
The new interface uses FindCommandRequest, and avoids any characteristics that relate specifically to the no-longer-supported OP_QUERY wire protocol message. This patch moves all callers of 'findOne()' onto the new API, but more work is required to fully eliminate the old 'query()' API from DBClientBase.
-rw-r--r--src/mongo/client/dbclient_base.cpp54
-rw-r--r--src/mongo/client/dbclient_base.h97
-rw-r--r--src/mongo/client/dbclient_cursor.cpp389
-rw-r--r--src/mongo/client/dbclient_cursor.h256
-rw-r--r--src/mongo/client/dbclient_mockcursor.cpp14
-rw-r--r--src/mongo/client/dbclient_rs.cpp114
-rw-r--r--src/mongo/client/dbclient_rs.h11
-rw-r--r--src/mongo/db/ops/write_ops_retryability.cpp7
-rw-r--r--src/mongo/db/query/query_request_helper.cpp179
-rw-r--r--src/mongo/db/query/query_request_helper.h13
-rw-r--r--src/mongo/db/query/query_request_test.cpp58
-rw-r--r--src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp2
-rw-r--r--src/mongo/db/repl/all_database_cloner.cpp3
-rw-r--r--src/mongo/db/repl/apply_ops.cpp8
-rw-r--r--src/mongo/db/repl/initial_sync_base_cloner.cpp2
-rw-r--r--src/mongo/db/repl/oplog.cpp9
-rw-r--r--src/mongo/db/repl/oplog_applier_impl_test.cpp8
-rw-r--r--src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp4
-rw-r--r--src/mongo/db/repl/oplog_fetcher.cpp5
-rw-r--r--src/mongo/db/repl/rollback_source_impl.cpp21
-rw-r--r--src/mongo/db/repl/tenant_collection_cloner.cpp7
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_service.cpp37
-rw-r--r--src/mongo/db/s/chunk_splitter.cpp22
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp2
-rw-r--r--src/mongo/db/s/create_collection_coordinator.cpp2
-rw-r--r--src/mongo/db/s/persistent_task_queue.h11
-rw-r--r--src/mongo/db/s/resharding/resharding_agg_test.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp10
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_test.cpp12
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp30
-rw-r--r--src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp14
-rw-r--r--src/mongo/db/s/session_catalog_migration_destination_test.cpp4
-rw-r--r--src/mongo/db/s/session_catalog_migration_source.cpp13
-rw-r--r--src/mongo/db/s/shard_key_util.cpp2
-rw-r--r--src/mongo/db/s/sharding_ddl_util_test.cpp6
-rw-r--r--src/mongo/db/s/transaction_coordinator_test.cpp7
-rw-r--r--src/mongo/db/s/transaction_coordinator_util.cpp6
-rw-r--r--src/mongo/db/transaction_participant.cpp2
-rw-r--r--src/mongo/dbtests/mock/mock_dbclient_connection.cpp97
-rw-r--r--src/mongo/dbtests/mock/mock_dbclient_connection.h8
-rw-r--r--src/mongo/dbtests/mock/mock_remote_db_server.cpp36
-rw-r--r--src/mongo/dbtests/mock/mock_remote_db_server.h16
-rw-r--r--src/mongo/dbtests/querytests.cpp105
-rw-r--r--src/mongo/dbtests/repltests.cpp22
-rw-r--r--src/mongo/dbtests/updatetests.cpp259
-rw-r--r--src/mongo/shell/encrypted_dbclient_base.cpp10
47 files changed, 1046 insertions, 952 deletions
diff --git a/src/mongo/client/dbclient_base.cpp b/src/mongo/client/dbclient_base.cpp
index e646d5c7461..a2f1bea40c5 100644
--- a/src/mongo/client/dbclient_base.cpp
+++ b/src/mongo/client/dbclient_base.cpp
@@ -564,32 +564,6 @@ bool DBClientBase::exists(const string& ns) {
return !results.empty();
}
-BSONObj DBClientBase::findOne(const string& ns,
- const BSONObj& filter,
- const Query& querySettings,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- boost::optional<BSONObj> readConcernObj) {
- unique_ptr<DBClientCursor> c = this->query(NamespaceString(ns),
- filter,
- querySettings,
- 1 /*limit*/,
- 0 /*nToSkip*/,
- fieldsToReturn,
- queryOptions,
- 0 /* batchSize */,
- readConcernObj);
-
- // query() throws on network error so OK to uassert with numeric code here.
- uassert(10276,
- str::stream() << "DBClientBase::findN: transport error: " << getServerAddress()
- << " ns: " << ns << " filter: " << filter.toString()
- << " query settings: " << querySettings.getFullSettingsDeprecated(),
- c.get());
-
- return c->more() ? c->nextSafe() : BSONObj();
-}
-
std::pair<BSONObj, NamespaceString> DBClientBase::findOneByUUID(
const std::string& db,
UUID uuid,
@@ -656,6 +630,34 @@ unique_ptr<DBClientCursor> DBClientBase::query(const NamespaceStringOrUUID& nsOr
return nullptr;
}
+std::unique_ptr<DBClientCursor> DBClientBase::find(FindCommandRequest findRequest,
+ const ReadPreferenceSetting& readPref) {
+ auto cursor = std::make_unique<DBClientCursor>(this, std::move(findRequest), readPref);
+ if (cursor->init()) {
+ return cursor;
+ }
+ return nullptr;
+}
+
+BSONObj DBClientBase::findOne(FindCommandRequest findRequest,
+ const ReadPreferenceSetting& readPref) {
+ tassert(5951200,
+ "caller cannot provide a limit when calling DBClientBase::findOne()",
+ !findRequest.getLimit());
+ findRequest.setLimit(1);
+ auto cursor = this->find(std::move(findRequest), readPref);
+
+ uassert(5951201, "DBClientBase::findOne() could not produce cursor", cursor);
+
+ return cursor->more() ? cursor->nextSafe() : BSONObj{};
+}
+
+BSONObj DBClientBase::findOne(const NamespaceStringOrUUID& nssOrUuid, BSONObj filter) {
+ FindCommandRequest findRequest{nssOrUuid};
+ findRequest.setFilter(std::move(filter));
+ return findOne(std::move(findRequest));
+}
+
unique_ptr<DBClientCursor> DBClientBase::getMore(const string& ns, long long cursorId) {
unique_ptr<DBClientCursor> c(
new DBClientCursor(this, NamespaceString(ns), cursorId, 0 /* limit */, 0 /* options */));
diff --git a/src/mongo/client/dbclient_base.h b/src/mongo/client/dbclient_base.h
index e0dd05ff0c2..f62fa148c26 100644
--- a/src/mongo/client/dbclient_base.h
+++ b/src/mongo/client/dbclient_base.h
@@ -510,21 +510,46 @@ public:
}
/**
- * Returns a single object that matches the query. if none do, then the object is empty.
- * Throws AssertionException.
+ * Issues a find command described by 'findRequest', and returns the resulting cursor.
+ */
+ virtual std::unique_ptr<DBClientCursor> find(FindCommandRequest findRequest,
+ const ReadPreferenceSetting& readPref);
+
+ /**
+ * Identical to the 'find()' overload above, but uses a default value of "primary" for the read
+ * preference.
+ */
+ std::unique_ptr<DBClientCursor> find(FindCommandRequest findRequest) {
+ ReadPreferenceSetting defaultReadPref{};
+ return find(std::move(findRequest), defaultReadPref);
+ }
+
+ /**
+ * Issues a find command describe by 'findRequest', but augments the request to have a limit of
+ * 1. It is illegal for the given 'findRequest' to have a limit already set.
*
- * The 'querySettings' argument might contain a subset of query settings, such as sort, hint,
- * etc. If the passed in 'querySettings' object also includes a filter (in its 'query'/'$query'
- * field), the filter will be ignored. Pass in the desired filter's BSON as 'filter' instead.
- * The other options parameters exist for historic reasons and will be eventually combined with
- * 'querySettings' into a single 'QueryOptions' parameter.
- */
- virtual BSONObj findOne(const std::string& ns,
- const BSONObj& filter,
- const Query& querySettings = Query(),
- const BSONObj* fieldsToReturn = nullptr,
- int queryOptions = 0,
- boost::optional<BSONObj> readConcernObj = boost::none);
+ * Returns the document resulting from the query, or an empty document if the query has no
+ * results.
+ */
+ virtual BSONObj findOne(FindCommandRequest findRequest, const ReadPreferenceSetting& readPref);
+
+ /**
+ * Identical to the 'findOne()' overload above, but uses a default value of "primary" for the
+ * read preference.
+ */
+ BSONObj findOne(FindCommandRequest findRequest) {
+ ReadPreferenceSetting defaultReadPref{};
+ return findOne(std::move(findRequest), defaultReadPref);
+ }
+
+ /**
+ * Issues a find command against the given collection (passed in either by namespace or by UUID)
+ * with the given 'filter'. Also augments the find request to have a limit of 1.
+ *
+ * Returns the document resulting from the query, or an empty document if the query has no
+ * results.
+ */
+ virtual BSONObj findOne(const NamespaceStringOrUUID& nssOrUuid, BSONObj filter);
/**
* Returns a pair with a single object that matches the filter within the collection specified
@@ -541,26 +566,7 @@ public:
boost::optional<BSONObj> readConcernObj = boost::none);
/**
- * Sends a query to the database.
- *
- * 'ns': Namespace to query, format is <dbname>.<collectname>[.<collectname>]*
- * 'filter': Query to perform on the collection.
- * 'querySettings': sort, hint, readPref, etc.
- * 'limit': The maximum number of documents that the cursor should return. 0 = unlimited.
- * 'nToSkip': Start with the nth item.
- * 'fieldsToReturn': Optional template of which fields to select. If unspecified, returns all
- * fields.
- * 'queryOptions': See options enum at top of this file.
- *
- * Notes:
- * The 'querySettings' argument might contain a subset of query settings, such as sort, hint,
- * etc. If the passed in 'querySettings' object also includes a filter (in its 'query'/'$query'
- * field), the filter will be ignored. Pass in the desired filter's BSON as 'filter' instead.
- * The other options parameters exist for historic reasons and will be eventually combined with
- * 'querySettings' into a single 'QueryOptions' parameter.
- *
- * Returns nullptr if error (connection failure).
- * Throws AssertionException.
+ * Legacy find API. Do not add new callers! Use the 'find*()' methods above instead.
*/
virtual std::unique_ptr<DBClientCursor> query(
const NamespaceStringOrUUID& nsOrUuid,
@@ -572,28 +578,6 @@ public:
int queryOptions = 0,
int batchSize = 0,
boost::optional<BSONObj> readConcernObj = boost::none);
-
- /**
- * Uses QueryOption_Exhaust, when available and specified in 'queryOptions'.
- *
- * Exhaust mode sends back all data queries as fast as possible, with no back-and-forth for
- * getMore. If you are certain you will exhaust the query, it could be useful. If exhaust mode
- * is not specified in 'queryOptions' or not available, this call transparently falls back to
- * using ordinary getMores.
- *
- * Use the DBClientCursorBatchIterator version, below, if you want to do items in large
- * blocks, perhaps to avoid granular locking and such.
- *
- * Notes:
- * The version that takes a BSONObj cannot return the namespace queried when the query is done
- * by UUID. If this is required, use the DBClientBatchIterator version.
- *
- * The 'querySettings' argument might contain a subset of query settings, such as sort, hint,
- * etc. If the passed in 'querySettings' object also includes a filter (in its 'query'/'$query'
- * field), the filter will be ignored. Pass in the desired filter's BSON as 'filter' instead.
- * The other options parameters exist for historic reasons and will be eventually combined with
- * 'querySettings' into a single 'QueryOptions' parameter.
- */
unsigned long long query(std::function<void(const BSONObj&)> f,
const NamespaceStringOrUUID& nsOrUuid,
const BSONObj& filter,
@@ -602,7 +586,6 @@ public:
int queryOptions = QueryOption_Exhaust,
int batchSize = 0,
boost::optional<BSONObj> readConcernObj = boost::none);
-
virtual unsigned long long query(std::function<void(DBClientCursorBatchIterator&)> f,
const NamespaceStringOrUUID& nsOrUuid,
const BSONObj& filter,
diff --git a/src/mongo/client/dbclient_cursor.cpp b/src/mongo/client/dbclient_cursor.cpp
index 7cbe0eb6330..81de065de0b 100644
--- a/src/mongo/client/dbclient_cursor.cpp
+++ b/src/mongo/client/dbclient_cursor.cpp
@@ -52,9 +52,7 @@
#include "mongo/rpc/factory.h"
#include "mongo/rpc/get_status_from_command_result.h"
#include "mongo/rpc/metadata.h"
-#include "mongo/rpc/object_check.h"
#include "mongo/s/stale_exception.h"
-#include "mongo/util/bufreader.h"
#include "mongo/util/debug_util.h"
#include "mongo/util/destructor_guard.h"
#include "mongo/util/exit.h"
@@ -68,49 +66,251 @@ using std::unique_ptr;
using std::vector;
namespace {
+BSONObj addMetadata(DBClientBase* client, BSONObj command) {
+ if (client->getRequestMetadataWriter()) {
+ BSONObjBuilder builder(command);
+ auto opCtx = (haveClient() ? cc().getOperationContext() : nullptr);
+ uassertStatusOK(client->getRequestMetadataWriter()(opCtx, &builder));
+ return builder.obj();
+ } else {
+ return command;
+ }
+}
+
Message assembleCommandRequest(DBClientBase* cli,
StringData database,
int legacyQueryOptions,
BSONObj legacyQuery) {
auto request = rpc::upconvertRequest(database, std::move(legacyQuery), legacyQueryOptions);
+ request.body = addMetadata(cli, std::move(request.body));
+ return request.serialize();
+}
- if (cli->getRequestMetadataWriter()) {
- BSONObjBuilder bodyBob(std::move(request.body));
- auto opCtx = (haveClient() ? cc().getOperationContext() : nullptr);
- uassertStatusOK(cli->getRequestMetadataWriter()(opCtx, &bodyBob));
- request.body = bodyBob.obj();
+Message assembleFromFindCommandRequest(DBClientBase* client,
+ StringData database,
+ const FindCommandRequest& request,
+ const ReadPreferenceSetting& readPref) {
+ BSONObj findCmd = request.toBSON(BSONObj());
+
+ // Add the $readPreference field to the request.
+ {
+ BSONObjBuilder builder{findCmd};
+ readPref.toContainingBSON(&builder);
+ findCmd = builder.obj();
}
- return request.serialize();
+ findCmd = addMetadata(client, std::move(findCmd));
+ auto opMsgRequest = OpMsgRequest::fromDBAndBody(database, findCmd);
+ return opMsgRequest.serialize();
+}
+
+/**
+ * Initializes options based on the value of the 'options' bit vector.
+ *
+ * This contains flags such as tailable, exhaust, and noCursorTimeout.
+ */
+void initFromInt(int options, FindCommandRequest* findCommand) {
+ bool tailable = (options & QueryOption_CursorTailable) != 0;
+ bool awaitData = (options & QueryOption_AwaitData) != 0;
+ if (awaitData) {
+ findCommand->setAwaitData(true);
+ }
+ if (tailable) {
+ findCommand->setTailable(true);
+ }
+
+ if ((options & QueryOption_NoCursorTimeout) != 0) {
+ findCommand->setNoCursorTimeout(true);
+ }
+ if ((options & QueryOption_PartialResults) != 0) {
+ findCommand->setAllowPartialResults(true);
+ }
+}
+
+/**
+ * Fills out the 'findCommand' output parameter based on the contents of 'querySettings'. Here,
+ * 'querySettings' has the same format as the "query" field of the no-longer-supported OP_QUERY wire
+ * protocol message. It can look something like this for example:
+ *
+ * {$query: ..., $hint: ..., $min: ..., $max: ...}
+ *
+ * Although the OP_QUERY wire protocol message is no longer ever sent over the wire by the internal
+ * client, callers of the internal client may still specify the operation they want to perform using
+ * an OP_QUERY-inspired format until DBClientCursor's legacy API is removed.
+ */
+Status initFullQuery(const BSONObj& querySettings, FindCommandRequest* findCommand) {
+ for (auto&& e : querySettings) {
+ StringData name = e.fieldNameStringData();
+
+ if (name == "$orderby" || name == "orderby") {
+ if (Object == e.type()) {
+ findCommand->setSort(e.embeddedObject().getOwned());
+ } else if (Array == e.type()) {
+ findCommand->setSort(e.embeddedObject());
+
+ // TODO: Is this ever used? I don't think so.
+ // Quote:
+ // This is for languages whose "objects" are not well ordered (JSON is well
+ // ordered).
+ // [ { a : ... } , { b : ... } ] -> { a : ..., b : ... }
+ // note: this is slow, but that is ok as order will have very few pieces
+ BSONObjBuilder b;
+ char p[2] = "0";
+
+ while (1) {
+ BSONObj j = findCommand->getSort().getObjectField(p);
+ if (j.isEmpty()) {
+ break;
+ }
+ BSONElement e = j.firstElement();
+ if (e.eoo()) {
+ return Status(ErrorCodes::BadValue, "bad order array");
+ }
+ if (!e.isNumber()) {
+ return Status(ErrorCodes::BadValue, "bad order array [2]");
+ }
+ b.append(e);
+ (*p)++;
+ if (!(*p <= '9')) {
+ return Status(ErrorCodes::BadValue, "too many ordering elements");
+ }
+ }
+
+ findCommand->setSort(b.obj());
+ } else {
+ return Status(ErrorCodes::BadValue, "sort must be object or array");
+ }
+ } else if (name.startsWith("$")) {
+ name = name.substr(1); // chop first char
+ if (name == "min") {
+ if (!e.isABSONObj()) {
+ return Status(ErrorCodes::BadValue, "$min must be a BSONObj");
+ }
+ findCommand->setMin(e.embeddedObject().getOwned());
+ } else if (name == "max") {
+ if (!e.isABSONObj()) {
+ return Status(ErrorCodes::BadValue, "$max must be a BSONObj");
+ }
+ findCommand->setMax(e.embeddedObject().getOwned());
+ } else if (name == "hint") {
+ if (e.isABSONObj()) {
+ findCommand->setHint(e.embeddedObject().getOwned());
+ } else if (String == e.type()) {
+ findCommand->setHint(e.wrap());
+ } else {
+ return Status(ErrorCodes::BadValue,
+ "$hint must be either a string or nested object");
+ }
+ } else if (name == "returnKey") {
+ // Won't throw.
+ if (e.trueValue()) {
+ findCommand->setReturnKey(true);
+ }
+ } else if (name == "showDiskLoc") {
+ // Won't throw.
+ if (e.trueValue()) {
+ findCommand->setShowRecordId(true);
+ query_request_helper::addShowRecordIdMetaProj(findCommand);
+ }
+ } else if (name == "maxTimeMS") {
+ StatusWith<int> maxTimeMS = parseMaxTimeMS(e);
+ if (!maxTimeMS.isOK()) {
+ return maxTimeMS.getStatus();
+ }
+ findCommand->setMaxTimeMS(maxTimeMS.getValue());
+ }
+ }
+ }
+
+ return Status::OK();
+}
+
+
+Status initFindCommandRequest(int ntoskip,
+ int queryOptions,
+ const BSONObj& filter,
+ const Query& querySettings,
+ const BSONObj& proj,
+ FindCommandRequest* findCommand) {
+ if (!proj.isEmpty()) {
+ findCommand->setProjection(proj.getOwned());
+ }
+ if (ntoskip) {
+ findCommand->setSkip(ntoskip);
+ }
+
+ // Initialize flags passed as 'queryOptions' bit vector.
+ initFromInt(queryOptions, findCommand);
+
+ findCommand->setFilter(filter.getOwned());
+ Status status = initFullQuery(querySettings.getFullSettingsDeprecated(), findCommand);
+ if (!status.isOK()) {
+ return status;
+ }
+
+ // It's not possible to specify readConcern in a legacy query message, so initialize it to
+ // an empty readConcern object, ie. equivalent to `readConcern: {}`. This ensures that
+ // mongos passes this empty readConcern to shards.
+ findCommand->setReadConcern(BSONObj());
+
+ return query_request_helper::validateFindCommandRequest(*findCommand);
+}
+
+StatusWith<std::unique_ptr<FindCommandRequest>> fromLegacyQuery(NamespaceStringOrUUID nssOrUuid,
+ const BSONObj& filter,
+ const Query& querySettings,
+ const BSONObj& proj,
+ int ntoskip,
+ int queryOptions) {
+ auto findCommand = std::make_unique<FindCommandRequest>(std::move(nssOrUuid));
+
+ Status status = initFindCommandRequest(
+ ntoskip, queryOptions, filter, querySettings, proj, findCommand.get());
+ if (!status.isOK()) {
+ return status;
+ }
+
+ return std::move(findCommand);
+}
+
+int queryOptionsFromFindCommand(const FindCommandRequest& findCmd,
+ const ReadPreferenceSetting& readPref) {
+ int queryOptions = 0;
+ if (readPref.canRunOnSecondary()) {
+ queryOptions = queryOptions & QueryOption_SecondaryOk;
+ }
+ if (findCmd.getTailable()) {
+ queryOptions = queryOptions & QueryOption_CursorTailable;
+ }
+ if (findCmd.getNoCursorTimeout()) {
+ queryOptions = queryOptions & QueryOption_NoCursorTimeout;
+ }
+ if (findCmd.getAwaitData()) {
+ queryOptions = queryOptions & QueryOption_AwaitData;
+ }
+ if (findCmd.getAllowPartialResults()) {
+ queryOptions = queryOptions & QueryOption_PartialResults;
+ }
+ return queryOptions;
}
} // namespace
-Message DBClientCursor::_assembleInit() {
- if (cursorId) {
- return _assembleGetMore();
- }
-
- // If we haven't gotten a cursorId yet, we need to issue a new query or command.
- // The caller supplies a filter and a query settings object which may have $-prefixed directives
- // in the format previously expected for a legacy OP_QUERY. Therefore, we use the legacy parsing
- // code supplied by query_request_helper. When actually issuing the request to the remote node,
- // we will assemble a find command.
- auto findCommand =
- query_request_helper::fromLegacyQuery(_nsOrUuid,
- _filter,
- _querySettings,
- fieldsToReturn ? *fieldsToReturn : BSONObj(),
- nToSkip,
- opts);
+Message DBClientCursor::initFromLegacyRequest() {
+ auto findCommand = fromLegacyQuery(_nsOrUuid,
+ _filter,
+ _querySettings,
+ _fieldsToReturn ? *_fieldsToReturn : BSONObj(),
+ _nToSkip,
+ _opts);
// If there was a problem building the query request, report that.
uassertStatusOK(findCommand.getStatus());
- if (limit) {
- findCommand.getValue()->setLimit(limit);
+ if (_limit) {
+ findCommand.getValue()->setLimit(_limit);
}
- if (batchSize) {
- findCommand.getValue()->setBatchSize(batchSize);
+ if (_batchSize) {
+ findCommand.getValue()->setBatchSize(_batchSize);
}
const BSONObj querySettings = _querySettings.getFullSettingsDeprecated();
@@ -144,14 +344,31 @@ Message DBClientCursor::_assembleInit() {
cmd = BSONObjBuilder(std::move(cmd)).append(readPref).obj();
}
- return assembleCommandRequest(_client, ns.db(), opts, std::move(cmd));
+ return assembleCommandRequest(_client, _ns.db(), _opts, std::move(cmd));
}
-Message DBClientCursor::_assembleGetMore() {
- invariant(cursorId);
- auto getMoreRequest = GetMoreCommandRequest(cursorId, ns.coll().toString());
+Message DBClientCursor::assembleInit() {
+ if (_cursorId) {
+ return assembleGetMore();
+ }
+
+ // We haven't gotten a cursorId yet so we need to issue the initial find command.
+ if (_findRequest) {
+ // The caller described their find command using the modern 'FindCommandRequest' API.
+ return assembleFromFindCommandRequest(_client, _ns.db(), *_findRequest, _readPref);
+ } else {
+ // The caller used a legacy API to describe the find operation, which may include $-prefixed
+ // directives in the format previously expected for an OP_QUERY. We need to upconvert this
+ // OP_QUERY-inspired format to a find command.
+ return initFromLegacyRequest();
+ }
+}
+
+Message DBClientCursor::assembleGetMore() {
+ invariant(_cursorId);
+ auto getMoreRequest = GetMoreCommandRequest(_cursorId, _ns.coll().toString());
getMoreRequest.setBatchSize(
- boost::make_optional(batchSize != 0, static_cast<int64_t>(batchSize)));
+ boost::make_optional(_batchSize != 0, static_cast<int64_t>(_batchSize)));
getMoreRequest.setMaxTimeMS(boost::make_optional(
tailableAwaitData(),
static_cast<std::int64_t>(durationCount<Milliseconds>(_awaitDataTimeout))));
@@ -159,10 +376,10 @@ Message DBClientCursor::_assembleGetMore() {
getMoreRequest.setTerm(static_cast<std::int64_t>(*_term));
}
getMoreRequest.setLastKnownCommittedOpTime(_lastKnownCommittedOpTime);
- auto msg = assembleCommandRequest(_client, ns.db(), opts, getMoreRequest.toBSON({}));
+ auto msg = assembleCommandRequest(_client, _ns.db(), _opts, getMoreRequest.toBSON({}));
// Set the exhaust flag if needed.
- if (opts & QueryOption_Exhaust && msg.operation() == dbMsg) {
+ if (_opts & QueryOption_Exhaust && msg.operation() == dbMsg) {
OpMsg::setFlag(&msg, OpMsg::kExhaustSupported);
}
return msg;
@@ -170,7 +387,7 @@ Message DBClientCursor::_assembleGetMore() {
bool DBClientCursor::init() {
invariant(!_connectionHasPendingReplies);
- Message toSend = _assembleInit();
+ Message toSend = assembleInit();
verify(_client);
Message reply;
try {
@@ -193,16 +410,16 @@ bool DBClientCursor::init() {
void DBClientCursor::requestMore() {
// For exhaust queries, once the stream has been initiated we get data blasted to us
// from the remote server, without a need to send any more 'getMore' requests.
- const auto isExhaust = opts & QueryOption_Exhaust;
+ const auto isExhaust = _opts & QueryOption_Exhaust;
if (isExhaust && _connectionHasPendingReplies) {
return exhaustReceiveMore();
}
invariant(!_connectionHasPendingReplies);
- verify(cursorId && batch.pos == batch.objs.size());
+ verify(_cursorId && _batch.pos == _batch.objs.size());
auto doRequestMore = [&] {
- Message toSend = _assembleGetMore();
+ Message toSend = assembleGetMore();
Message response;
_client->call(toSend, response);
dataReceived(response);
@@ -223,9 +440,9 @@ void DBClientCursor::requestMore() {
* cursor id of 0.
*/
void DBClientCursor::exhaustReceiveMore() {
- verify(cursorId);
- verify(batch.pos == batch.objs.size());
- uassert(40675, "Cannot have limit for exhaust query", limit == 0);
+ verify(_cursorId);
+ verify(_batch.pos == _batch.objs.size());
+ uassert(40675, "Cannot have limit for exhaust query", _limit == 0);
Message response;
verify(_client);
uassertStatusOK(
@@ -251,28 +468,28 @@ BSONObj DBClientCursor::commandDataReceived(const Message& reply) {
uassertStatusOK(
commandStatus.withContext("stale config in DBClientCursor::dataReceived()"));
} else if (!commandStatus.isOK()) {
- wasError = true;
+ _wasError = true;
}
return commandReply->getCommandReply().getOwned();
}
void DBClientCursor::dataReceived(const Message& reply, bool& retry, string& host) {
- batch.objs.clear();
- batch.pos = 0;
+ _batch.objs.clear();
+ _batch.pos = 0;
const auto replyObj = commandDataReceived(reply);
- cursorId = 0; // Don't try to kill cursor if we get back an error.
+ _cursorId = 0; // Don't try to kill cursor if we get back an error.
auto cr = uassertStatusOK(CursorResponse::parseFromBSON(replyObj));
- cursorId = cr.getCursorId();
+ _cursorId = cr.getCursorId();
uassert(50935,
"Received a getMore response with a cursor id of 0 and the moreToCome flag set.",
- !(_connectionHasPendingReplies && cursorId == 0));
+ !(_connectionHasPendingReplies && _cursorId == 0));
- ns = cr.getNSS(); // find command can change the ns to use for getMores.
+ _ns = cr.getNSS(); // find command can change the ns to use for getMores.
// Store the resume token, if we got one.
_postBatchResumeToken = cr.getPostBatchResumeToken();
- batch.objs = cr.releaseBatch();
+ _batch.objs = cr.releaseBatch();
if (replyObj.hasField(LogicalTime::kOperationTimeFieldName)) {
_operationTime = LogicalTime::fromOperationTime(replyObj).asTimestamp();
@@ -284,14 +501,14 @@ bool DBClientCursor::more() {
if (!_putBack.empty())
return true;
- if (batch.pos < batch.objs.size())
+ if (_batch.pos < _batch.objs.size())
return true;
- if (cursorId == 0)
+ if (_cursorId == 0)
return false;
requestMore();
- return batch.pos < batch.objs.size();
+ return _batch.pos < _batch.objs.size();
}
BSONObj DBClientCursor::next() {
@@ -302,10 +519,9 @@ BSONObj DBClientCursor::next() {
}
uassert(
- 13422, "DBClientCursor next() called but more() is false", batch.pos < batch.objs.size());
+ 13422, "DBClientCursor next() called but more() is false", _batch.pos < _batch.objs.size());
- /* todo would be good to make data null at end of batch for safety */
- return std::move(batch.objs[batch.pos++]);
+ return std::move(_batch.objs[_batch.pos++]);
}
BSONObj DBClientCursor::nextSafe() {
@@ -313,7 +529,7 @@ BSONObj DBClientCursor::nextSafe() {
// Only convert legacy errors ($err) to exceptions. Otherwise, just return the response and the
// caller will interpret it as a command error.
- if (wasError && strcmp(o.firstElementFieldName(), "$err") == 0) {
+ if (_wasError && strcmp(o.firstElementFieldName(), "$err") == 0) {
uassertStatusOK(getStatusFromCommandResult(o));
}
@@ -321,10 +537,10 @@ BSONObj DBClientCursor::nextSafe() {
}
void DBClientCursor::peek(vector<BSONObj>& v, int atMost) {
- auto end = atMost >= static_cast<int>(batch.objs.size() - batch.pos)
- ? batch.objs.end()
- : batch.objs.begin() + batch.pos + atMost;
- v.insert(v.end(), batch.objs.begin() + batch.pos, end);
+ auto end = atMost >= static_cast<int>(_batch.objs.size() - _batch.pos)
+ ? _batch.objs.end()
+ : _batch.objs.begin() + _batch.pos + atMost;
+ v.insert(v.end(), _batch.objs.begin() + _batch.pos, end);
}
BSONObj DBClientCursor::peekFirst() {
@@ -338,7 +554,7 @@ BSONObj DBClientCursor::peekFirst() {
}
bool DBClientCursor::peekError(BSONObj* error) {
- if (!wasError)
+ if (!_wasError)
return false;
vector<BSONObj> v;
@@ -434,28 +650,45 @@ DBClientCursor::DBClientCursor(DBClientBase* client,
boost::optional<BSONObj> readConcernObj,
boost::optional<Timestamp> operationTime,
boost::optional<BSONObj> postBatchResumeToken)
- : batch{std::move(initialBatch)},
+ : _batch{std::move(initialBatch)},
_client(client),
_originalHost(_client->getServerAddress()),
_nsOrUuid(nsOrUuid),
- ns(nsOrUuid.nss() ? *nsOrUuid.nss() : NamespaceString(nsOrUuid.dbname())),
+ _ns(nsOrUuid.nss() ? *nsOrUuid.nss() : NamespaceString(nsOrUuid.dbname())),
+ _cursorId(cursorId),
+ _batchSize(batchSize == 1 ? 2 : batchSize),
+ _limit(limit),
_filter(filter),
_querySettings(querySettings),
- limit(limit),
- nToSkip(nToSkip),
- fieldsToReturn(fieldsToReturn),
- opts(queryOptions),
- batchSize(batchSize == 1 ? 2 : batchSize),
- cursorId(cursorId),
- _ownCursor(true),
- wasError(false),
+ _nToSkip(nToSkip),
+ _fieldsToReturn(fieldsToReturn),
_readConcernObj(readConcernObj),
+ _opts(queryOptions),
_operationTime(operationTime),
_postBatchResumeToken(postBatchResumeToken) {
- tassert(5746103, "DBClientCursor limit must be non-negative", limit >= 0);
+ tassert(5746103, "DBClientCursor limit must be non-negative", _limit >= 0);
+}
+
+DBClientCursor::DBClientCursor(DBClientBase* client,
+ FindCommandRequest findRequest,
+ const ReadPreferenceSetting& readPref)
+ : _client(client),
+ _originalHost(_client->getServerAddress()),
+ _nsOrUuid(findRequest.getNamespaceOrUUID()),
+ _ns(_nsOrUuid.nss() ? *_nsOrUuid.nss() : NamespaceString(_nsOrUuid.dbname())),
+ _batchSize(findRequest.getBatchSize().value_or(0)),
+ _limit(findRequest.getLimit().value_or(0)),
+ _findRequest(std::move(findRequest)),
+ _readPref(readPref),
+ _opts(queryOptionsFromFindCommand(*_findRequest, _readPref)) {
+ // Internal clients should always pass an explicit readConcern. If the caller did not already
+ // pass a readConcern than we must explicitly initialize an empty readConcern so that it ends up
+ // in the serialized version of the find command which will be sent across the wire.
+ if (!_findRequest->getReadConcern()) {
+ _findRequest->setReadConcern(BSONObj{});
+ }
}
-/* static */
StatusWith<std::unique_ptr<DBClientCursor>> DBClientCursor::fromAggregationRequest(
DBClientBase* client, AggregateCommandRequest aggRequest, bool secondaryOk, bool useExhaust) {
BSONObj ret;
@@ -504,8 +737,8 @@ DBClientCursor::~DBClientCursor() {
void DBClientCursor::kill() {
DESTRUCTOR_GUARD({
- if (cursorId && _ownCursor && !globalInShutdownDeprecated()) {
- auto killCursor = [&](auto&& conn) { conn->killCursor(ns, cursorId); };
+ if (_cursorId && !globalInShutdownDeprecated()) {
+ auto killCursor = [&](auto&& conn) { conn->killCursor(_ns, _cursorId); };
// We only need to kill the cursor if there aren't pending replies. Pending replies
// indicates that this is an exhaust cursor, so the connection must be closed and the
@@ -517,7 +750,7 @@ void DBClientCursor::kill() {
});
// Mark this cursor as dead since we can't do any getMores.
- cursorId = 0;
+ _cursorId = 0;
}
diff --git a/src/mongo/client/dbclient_cursor.h b/src/mongo/client/dbclient_cursor.h
index 94c541e81f3..ad8eba20d77 100644
--- a/src/mongo/client/dbclient_cursor.h
+++ b/src/mongo/client/dbclient_cursor.h
@@ -36,6 +36,7 @@
#include "mongo/db/jsobj.h"
#include "mongo/db/json.h"
#include "mongo/db/namespace_string.h"
+#include "mongo/db/query/find_command_gen.h"
#include "mongo/rpc/message.h"
namespace mongo {
@@ -44,57 +45,102 @@ class AScopedConnection;
class DBClientBase;
class AggregateCommandRequest;
-/** Queries return a cursor object */
+/**
+ * The internal client's cursor representation for find or agg cursors. The cursor is iterated by
+ * the caller using the 'more()' and 'next()' methods. Any necessary getMore requests are
+ * constructed and issued internally.
+ */
class DBClientCursor {
DBClientCursor(const DBClientCursor&) = delete;
DBClientCursor& operator=(const DBClientCursor&) = delete;
public:
- /** If true, safe to call next(). Requests more from server if necessary. */
+ static StatusWith<std::unique_ptr<DBClientCursor>> fromAggregationRequest(
+ DBClientBase* client,
+ AggregateCommandRequest aggRequest,
+ bool secondaryOk,
+ bool useExhaust);
+
+ DBClientCursor(DBClientBase* client,
+ const NamespaceStringOrUUID& nsOrUuid,
+ const BSONObj& filter,
+ const Query& querySettings,
+ int limit,
+ int nToSkip,
+ const BSONObj* fieldsToReturn,
+ int queryOptions,
+ int bs,
+ boost::optional<BSONObj> readConcernObj = boost::none);
+
+ DBClientCursor(DBClientBase* client,
+ const NamespaceStringOrUUID& nsOrUuid,
+ long long cursorId,
+ int limit,
+ int options,
+ std::vector<BSONObj> initialBatch = {},
+ boost::optional<Timestamp> operationTime = boost::none,
+ boost::optional<BSONObj> postBatchResumeToken = boost::none);
+
+ DBClientCursor(DBClientBase* client,
+ FindCommandRequest findRequest,
+ const ReadPreferenceSetting& readPref);
+
+ virtual ~DBClientCursor();
+
+ /**
+ * If true, safe to call next(). Requests more from server if necessary.
+ */
virtual bool more();
bool hasMoreToCome() const {
return _connectionHasPendingReplies;
}
- /** If true, there is more in our local buffers to be fetched via next(). Returns
- false when a getMore request back to server would be required. You can use this
- if you want to exhaust whatever data has been fetched to the client already but
- then perhaps stop.
- */
+ /**
+ * If true, there is more in our local buffers to be fetched via next(). Returns false when a
+ * getMore request back to server would be required. You can use this if you want to exhaust
+ * whatever data has been fetched to the client already but then perhaps stop.
+ */
int objsLeftInBatch() const {
- return _putBack.size() + batch.objs.size() - batch.pos;
+ return _putBack.size() + _batch.objs.size() - _batch.pos;
}
bool moreInCurrentBatch() {
return objsLeftInBatch() > 0;
}
- /** next
- @return next object in the result cursor.
- on an error at the remote server, you will get back:
- { $err: <std::string> }
- if you do not want to handle that yourself, call nextSafe().
- */
+ /**
+ * Returns the next object from the cursor.
+ *
+ * On error at the remote server, you will get back:
+ * {$err: <std::string>
+ *
+ * If you do not want to handle that yourself, call 'nextSafe()'.
+ */
virtual BSONObj next();
/**
- restore an object previously returned by next() to the cursor
+ * Restores an object previously returned by next() to the cursor.
*/
void putBack(const BSONObj& o) {
_putBack.push(o.getOwned());
}
- /** throws AssertionException if get back { $err : ... } */
+ /**
+ * Similar to 'next()', but throws an AssertionException on error.
+ */
BSONObj nextSafe();
- /** peek ahead at items buffered for future next() calls.
- never requests new data from the server. so peek only effective
- with what is already buffered.
- WARNING: no support for _putBack yet!
- */
+ /**
+ * Peek ahead at items buffered for future next() calls. Never requests new data from the
+ * server.
+ *
+ * WARNING: no support for _putBack yet!
+ */
void peek(std::vector<BSONObj>&, int atMost);
- // Peeks at first element, if exists
+ /**
+ * Peeks at first element. If no first element exists, returns an empty object.
+ */
BSONObj peekFirst();
/**
@@ -103,7 +149,7 @@ public:
bool peekError(BSONObj* error = nullptr);
/**
- iterate the rest of the cursor and return the number if items
+ * Iterates the rest of the cursor and returns the resulting number if items.
*/
int itcount() {
int c = 0;
@@ -114,65 +160,31 @@ public:
return c;
}
- /** cursor no longer valid -- use with tailable cursors.
- note you should only rely on this once more() returns false;
- 'dead' may be preset yet some data still queued and locally
- available from the dbclientcursor.
- */
+ /**
+ * Returns true if the cursor is no longer open on the remote node (the remote node has returned
+ * a cursor id of zero).
+ */
bool isDead() const {
- return cursorId == 0;
+ return _cursorId == 0;
}
bool tailable() const {
- return (opts & QueryOption_CursorTailable) != 0;
+ return (_opts & QueryOption_CursorTailable) != 0;
}
bool tailableAwaitData() const {
- return tailable() && (opts & QueryOption_AwaitData);
+ return tailable() && (_opts & QueryOption_AwaitData);
}
- /// Change batchSize after construction. Can change after requesting first batch.
+ /**
+ * Changes the cursor's batchSize after construction. Can change after requesting first batch.
+ */
void setBatchSize(int newBatchSize) {
- batchSize = newBatchSize;
+ _batchSize = newBatchSize;
}
- DBClientCursor(DBClientBase* client,
- const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter,
- const Query& querySettings,
- int limit,
- int nToSkip,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- int bs,
- boost::optional<BSONObj> readConcernObj = boost::none);
-
- DBClientCursor(DBClientBase* client,
- const NamespaceStringOrUUID& nsOrUuid,
- long long cursorId,
- int limit,
- int options,
- std::vector<BSONObj> initialBatch = {},
- boost::optional<Timestamp> operationTime = boost::none,
- boost::optional<BSONObj> postBatchResumeToken = boost::none);
-
- static StatusWith<std::unique_ptr<DBClientCursor>> fromAggregationRequest(
- DBClientBase* client,
- AggregateCommandRequest aggRequest,
- bool secondaryOk,
- bool useExhaust);
-
- virtual ~DBClientCursor();
-
long long getCursorId() const {
- return cursorId;
- }
-
- /** by default we "own" the cursor and will send the server a KillCursor
- message when ~DBClientCursor() is called. This function overrides that.
- */
- void decouple() {
- _ownCursor = false;
+ return _cursorId;
}
void attach(AScopedConnection* conn);
@@ -182,21 +194,17 @@ public:
}
std::string getns() const {
- return ns.ns();
+ return _ns.ns();
}
const NamespaceString& getNamespaceString() const {
- return ns;
+ return _ns;
}
- /**
- * actually does the query
- */
- bool init();
/**
- * For exhaust. Used in DBClientConnection.
+ * Performs the initial query, opening the cursor.
*/
- void exhaustReceiveMore();
+ bool init();
/**
* Marks this object as dead and sends the KillCursors message to the server.
@@ -265,7 +273,7 @@ protected:
size_t pos = 0;
};
- Batch batch;
+ Batch _batch;
private:
DBClientCursor(DBClientBase* client,
@@ -283,34 +291,6 @@ private:
boost::optional<Timestamp> operationTime,
boost::optional<BSONObj> postBatchResumeToken = boost::none);
- DBClientBase* _client;
- std::string _originalHost;
- NamespaceStringOrUUID _nsOrUuid;
- // 'ns' is initially the NamespaceString passed in, or the dbName if doing a find by UUID.
- // After a successful 'find' command, 'ns' is updated to contain the namespace returned by that
- // command.
- NamespaceString ns;
- BSONObj _filter;
- Query _querySettings;
- int limit;
- int nToSkip;
- const BSONObj* fieldsToReturn;
- int opts;
- int batchSize;
- std::stack<BSONObj> _putBack;
- long long cursorId;
- bool _ownCursor; // see decouple()
- std::string _scopedHost;
- bool wasError;
- bool _connectionHasPendingReplies = false;
- int _lastRequestId = 0;
- Milliseconds _awaitDataTimeout = Milliseconds{0};
- boost::optional<long long> _term;
- boost::optional<repl::OpTime> _lastKnownCommittedOpTime;
- boost::optional<BSONObj> _readConcernObj;
- boost::optional<Timestamp> _operationTime;
- boost::optional<BSONObj> _postBatchResumeToken;
-
void dataReceived(const Message& reply) {
bool retry;
std::string lazyHost;
@@ -326,9 +306,67 @@ private:
void requestMore();
- // init pieces
- Message _assembleInit();
- Message _assembleGetMore();
+ void exhaustReceiveMore();
+
+ Message assembleInit();
+ Message assembleGetMore();
+
+ /**
+ * Constructs the initial find commmand request based on a legacy OP_QUERY-style description of
+ * the find operation. Only used if the caller constructed the 'DBClientCursor' with the legacy
+ * API.
+ */
+ Message initFromLegacyRequest();
+
+ DBClientBase* _client;
+ std::string _originalHost;
+ NamespaceStringOrUUID _nsOrUuid;
+
+ // 'ns' is initially the NamespaceString passed in, or the dbName if doing a find by UUID.
+ // After a successful 'find' command, 'ns' is updated to contain the namespace returned by that
+ // command.
+ NamespaceString _ns;
+
+ long long _cursorId = 0;
+
+ std::stack<BSONObj> _putBack;
+ std::string _scopedHost;
+ bool _wasError = false;
+ bool _connectionHasPendingReplies = false;
+ int _lastRequestId = 0;
+
+ int _batchSize;
+ int _limit = 0;
+
+ // If the caller describes the find command being executed by this cursor as a
+ // 'FindCommandRequest', then that request object and the associated read preference are set
+ // here. Otherwise, if the caller uses the legacy OP_QUERY-inspired API, these members are
+ // default-initialized but never used.
+ boost::optional<FindCommandRequest> _findRequest;
+ ReadPreferenceSetting _readPref;
+
+ // These data members are only used if the cursor was constructed using the legacy
+ // OP_QUERY-inspired API. If the cursor was constructed using the 'FindCommandRequest'-based
+ // API, these are initialized to their default values but never used.
+ BSONObj _filter;
+ Query _querySettings;
+ int _nToSkip = 0;
+ const BSONObj* _fieldsToReturn = nullptr;
+ boost::optional<BSONObj> _readConcernObj;
+
+ // This has the same meaning as the flags bit vector from the no-longer-supported OP_QUERY wire
+ // protocol message. However, it is initialized even if the caller constructed the cursor using
+ // the 'FindCommandRequest`-based API.
+ //
+ // We should eventually stop using the OP_QUERY flags bit vector in server code, since OP_QUERY
+ // is no longer supported.
+ int _opts;
+
+ Milliseconds _awaitDataTimeout = Milliseconds{0};
+ boost::optional<long long> _term;
+ boost::optional<repl::OpTime> _lastKnownCommittedOpTime;
+ boost::optional<Timestamp> _operationTime;
+ boost::optional<BSONObj> _postBatchResumeToken;
};
/** iterate over objects in current batch only - will not cause a network call
diff --git a/src/mongo/client/dbclient_mockcursor.cpp b/src/mongo/client/dbclient_mockcursor.cpp
index 1647111c0b4..7082f55517e 100644
--- a/src/mongo/client/dbclient_mockcursor.cpp
+++ b/src/mongo/client/dbclient_mockcursor.cpp
@@ -53,10 +53,10 @@ DBClientMockCursor::DBClientMockCursor(mongo::DBClientBase* client,
}
bool DBClientMockCursor::more() {
- if (_batchSize && batch.pos == _batchSize) {
+ if (_batchSize && _batch.pos == _batchSize) {
_fillNextBatch();
}
- return batch.pos < batch.objs.size();
+ return _batch.pos < _batch.objs.size();
}
void DBClientMockCursor::_fillNextBatch() {
@@ -71,15 +71,15 @@ void DBClientMockCursor::_fillNextBatch() {
});
int leftInBatch = _batchSize;
- batch.objs.clear();
+ _batch.objs.clear();
while (_iter.more() && (!_batchSize || leftInBatch--)) {
- batch.objs.emplace_back(_iter.next().Obj().getOwned());
+ _batch.objs.emplace_back(_iter.next().Obj().getOwned());
}
- batch.pos = 0;
+ _batch.pos = 0;
// Store a mock resume token, if applicable.
- if (!batch.objs.empty()) {
- auto lastId = batch.objs.back()["_id"].numberInt();
+ if (!_batch.objs.empty()) {
+ auto lastId = _batch.objs.back()["_id"].numberInt();
_postBatchResumeToken = BSON("n" << lastId);
}
}
diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp
index e434b4634f9..5ee9708a9d4 100644
--- a/src/mongo/client/dbclient_rs.cpp
+++ b/src/mongo/client/dbclient_rs.cpp
@@ -535,6 +535,59 @@ void DBClientReplicaSet::remove(const string& ns,
checkPrimary()->remove(ns, filter, removeMany, writeConcernObj);
}
+std::unique_ptr<DBClientCursor> DBClientReplicaSet::find(FindCommandRequest findRequest,
+ const ReadPreferenceSetting& readPref) {
+ invariant(findRequest.getNamespaceOrUUID().nss());
+ const std::string nss = findRequest.getNamespaceOrUUID().nss()->ns();
+ if (_isSecondaryQuery(nss, findRequest.toBSON(BSONObj{}), readPref)) {
+ LOGV2_DEBUG(5951202,
+ 3,
+ "dbclient_rs query using secondary or tagged node selection",
+ "replicaSet"_attr = _getMonitor()->getName(),
+ "readPref"_attr = readPref.toString(),
+ "primary"_attr =
+ (_primary.get() != nullptr ? _primary->getServerAddress() : "[not cached]"),
+ "lastTagged"_attr = (_lastSecondaryOkConn.get() != nullptr
+ ? _lastSecondaryOkConn->getServerAddress()
+ : "[not cached]"));
+ std::string lastNodeErrMsg;
+
+ for (size_t retry = 0; retry < MAX_RETRY; retry++) {
+ try {
+ DBClientConnection* conn =
+ selectNodeUsingTags(std::make_shared<ReadPreferenceSetting>(readPref));
+ if (!conn) {
+ break;
+ }
+
+ std::unique_ptr<DBClientCursor> cursor = conn->find(findRequest, readPref);
+
+ return checkSecondaryQueryResult(std::move(cursor));
+ } catch (const DBException& ex) {
+ const Status status = ex.toStatus(str::stream() << "can't query replica set node "
+ << _lastSecondaryOkHost);
+ lastNodeErrMsg = status.reason();
+ _invalidateLastSecondaryOkCache(status);
+ }
+ }
+
+ StringBuilder assertMsg;
+ assertMsg << "Failed to do query, no good nodes in " << _getMonitor()->getName();
+ if (!lastNodeErrMsg.empty()) {
+ assertMsg << ", last error: " << lastNodeErrMsg;
+ }
+
+ uasserted(5951203, assertMsg.str());
+ }
+
+ LOGV2_DEBUG(5951204,
+ 3,
+ "dbclient_rs query to primary node",
+ "replicaSet"_attr = _getMonitor()->getName());
+
+ return checkPrimary()->find(std::move(findRequest), readPref);
+}
+
unique_ptr<DBClientCursor> DBClientReplicaSet::query(const NamespaceStringOrUUID& nsOrUuid,
const BSONObj& filter,
const Query& querySettings,
@@ -617,67 +670,6 @@ unique_ptr<DBClientCursor> DBClientReplicaSet::query(const NamespaceStringOrUUID
readConcernObj);
}
-BSONObj DBClientReplicaSet::findOne(const string& ns,
- const BSONObj& filter,
- const Query& querySettings,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- boost::optional<BSONObj> readConcernObj) {
- shared_ptr<ReadPreferenceSetting> readPref(_extractReadPref(querySettings, queryOptions));
- if (_isSecondaryQuery(ns, filter, *readPref)) {
- LOGV2_DEBUG(20135,
- 3,
- "dbclient_rs findOne using secondary or tagged node selection in {replicaSet}, "
- "read pref is {readPref} "
- "(primary : {primary}, lastTagged : {lastTagged})",
- "dbclient_rs findOne using secondary or tagged node selection",
- "replicaSet"_attr = _getMonitor()->getName(),
- "readPref"_attr = readPref->toString(),
- "primary"_attr =
- (_primary.get() != nullptr ? _primary->getServerAddress() : "[not cached]"),
- "secondaryHostNamme"_attr = (_lastSecondaryOkConn.get() != nullptr
- ? _lastSecondaryOkConn->getServerAddress()
- : "[not cached]"));
-
- string lastNodeErrMsg;
-
- for (size_t retry = 0; retry < MAX_RETRY; retry++) {
- try {
- DBClientConnection* conn = selectNodeUsingTags(readPref);
-
- if (conn == nullptr) {
- break;
- }
-
- return conn->findOne(
- ns, filter, querySettings, fieldsToReturn, queryOptions, readConcernObj);
- } catch (const DBException& ex) {
- const Status status = ex.toStatus(str::stream() << "can't findone replica set node "
- << _lastSecondaryOkHost.toString());
- lastNodeErrMsg = status.reason();
- _invalidateLastSecondaryOkCache(status);
- }
- }
-
- StringBuilder assertMsg;
- assertMsg << "Failed to call findOne, no good nodes in " << _getMonitor()->getName();
- if (!lastNodeErrMsg.empty()) {
- assertMsg << ", last error: " << lastNodeErrMsg;
- }
-
- uasserted(16379, assertMsg.str());
- }
-
- LOGV2_DEBUG(20136,
- 3,
- "dbclient_rs findOne to primary node in {replicaSet}",
- "dbclient_rs findOne to primary node",
- "replicaSet"_attr = _getMonitor()->getName());
-
- return checkPrimary()->findOne(
- ns, filter, querySettings, fieldsToReturn, queryOptions, readConcernObj);
-}
-
void DBClientReplicaSet::killCursor(const NamespaceString& ns, long long cursorID) {
// we should never call killCursor on a replica set connection
// since we don't know which server it belongs to
diff --git a/src/mongo/client/dbclient_rs.h b/src/mongo/client/dbclient_rs.h
index 7dd53d30448..e0b153c03c2 100644
--- a/src/mongo/client/dbclient_rs.h
+++ b/src/mongo/client/dbclient_rs.h
@@ -88,6 +88,9 @@ public:
// ----------- simple functions --------------
+ std::unique_ptr<DBClientCursor> find(FindCommandRequest findRequest,
+ const ReadPreferenceSetting& readPref) override;
+
/** throws userassertion "no primary found" */
std::unique_ptr<DBClientCursor> query(
const NamespaceStringOrUUID& nsOrUuid,
@@ -100,14 +103,6 @@ public:
int batchSize = 0,
boost::optional<BSONObj> readConcernObj = boost::none) override;
- /** throws userassertion "no primary found" */
- BSONObj findOne(const std::string& ns,
- const BSONObj& filter,
- const Query& querySettings,
- const BSONObj* fieldsToReturn = nullptr,
- int queryOptions = 0,
- boost::optional<BSONObj> readConcernObj = boost::none) override;
-
void insert(const std::string& ns,
BSONObj obj,
bool ordered = true,
diff --git a/src/mongo/db/ops/write_ops_retryability.cpp b/src/mongo/db/ops/write_ops_retryability.cpp
index 65fffa768d9..4cbc3a20366 100644
--- a/src/mongo/db/ops/write_ops_retryability.cpp
+++ b/src/mongo/db/ops/write_ops_retryability.cpp
@@ -118,8 +118,8 @@ BSONObj extractPreOrPostImage(OperationContext* opCtx, const repl::OplogEntry& o
LogicalSessionId sessionId = oplog.getSessionId().get();
TxnNumber txnNumber = oplog.getTxnNumber().get();
Timestamp ts = oplog.getTimestamp();
- BSONObj imageDoc = client.findOne(NamespaceString::kConfigImagesNamespace.ns(),
- BSON("_id" << sessionId.toBSON()) /*filter*/);
+ BSONObj imageDoc = client.findOne(NamespaceString::kConfigImagesNamespace,
+ BSON("_id" << sessionId.toBSON()));
if (imageDoc.isEmpty()) {
LOGV2_WARNING(5676402,
"Image lookup for a retryable findAndModify was not found",
@@ -170,8 +170,7 @@ BSONObj extractPreOrPostImage(OperationContext* opCtx, const repl::OplogEntry& o
auto opTime = oplog.getPreImageOpTime() ? oplog.getPreImageOpTime().value()
: oplog.getPostImageOpTime().value();
- auto oplogDoc =
- client.findOne(NamespaceString::kRsOplogNamespace.ns(), opTime.asQuery(), Query(), nullptr);
+ auto oplogDoc = client.findOne(NamespaceString::kRsOplogNamespace, opTime.asQuery());
uassert(40613,
str::stream() << "oplog no longer contains the complete write history of this "
diff --git a/src/mongo/db/query/query_request_helper.cpp b/src/mongo/db/query/query_request_helper.cpp
index 90d6e386d56..27b0cf5c969 100644
--- a/src/mongo/db/query/query_request_helper.cpp
+++ b/src/mongo/db/query/query_request_helper.cpp
@@ -44,33 +44,17 @@ namespace mongo {
namespace query_request_helper {
namespace {
-
/**
- * Initializes options based on the value of the 'options' bit vector.
- *
- * This contains flags such as tailable, exhaust, and noCursorTimeout.
+ * Add the meta projection to this object if needed.
*/
-void initFromInt(int options, FindCommandRequest* findCommand) {
- bool tailable = (options & QueryOption_CursorTailable) != 0;
- bool awaitData = (options & QueryOption_AwaitData) != 0;
- if (awaitData) {
- findCommand->setAwaitData(true);
- }
- if (tailable) {
- findCommand->setTailable(true);
- }
-
- if ((options & QueryOption_NoCursorTimeout) != 0) {
- findCommand->setNoCursorTimeout(true);
- }
- if ((options & QueryOption_PartialResults) != 0) {
- findCommand->setAllowPartialResults(true);
+void addMetaProjection(FindCommandRequest* findCommand) {
+ if (findCommand->getShowRecordId()) {
+ addShowRecordIdMetaProj(findCommand);
}
}
-/**
- * Updates the projection object with a $meta projection for the showRecordId option.
- */
+} // namespace
+
void addShowRecordIdMetaProj(FindCommandRequest* findCommand) {
if (findCommand->getProjection()["$recordId"]) {
// There's already some projection on $recordId. Don't overwrite it.
@@ -84,136 +68,6 @@ void addShowRecordIdMetaProj(FindCommandRequest* findCommand) {
findCommand->setProjection(projBob.obj());
}
-/**
- * Add the meta projection to this object if needed.
- */
-void addMetaProjection(FindCommandRequest* findCommand) {
- if (findCommand->getShowRecordId()) {
- addShowRecordIdMetaProj(findCommand);
- }
-}
-
-Status initFullQuery(const BSONObj& top, FindCommandRequest* findCommand) {
- BSONObjIterator i(top);
-
- while (i.more()) {
- BSONElement e = i.next();
- StringData name = e.fieldNameStringData();
-
- if (name == "$orderby" || name == "orderby") {
- if (Object == e.type()) {
- findCommand->setSort(e.embeddedObject().getOwned());
- } else if (Array == e.type()) {
- findCommand->setSort(e.embeddedObject());
-
- // TODO: Is this ever used? I don't think so.
- // Quote:
- // This is for languages whose "objects" are not well ordered (JSON is well
- // ordered).
- // [ { a : ... } , { b : ... } ] -> { a : ..., b : ... }
- // note: this is slow, but that is ok as order will have very few pieces
- BSONObjBuilder b;
- char p[2] = "0";
-
- while (1) {
- BSONObj j = findCommand->getSort().getObjectField(p);
- if (j.isEmpty()) {
- break;
- }
- BSONElement e = j.firstElement();
- if (e.eoo()) {
- return Status(ErrorCodes::BadValue, "bad order array");
- }
- if (!e.isNumber()) {
- return Status(ErrorCodes::BadValue, "bad order array [2]");
- }
- b.append(e);
- (*p)++;
- if (!(*p <= '9')) {
- return Status(ErrorCodes::BadValue, "too many ordering elements");
- }
- }
-
- findCommand->setSort(b.obj());
- } else {
- return Status(ErrorCodes::BadValue, "sort must be object or array");
- }
- } else if (name.startsWith("$")) {
- name = name.substr(1); // chop first char
- if (name == "min") {
- if (!e.isABSONObj()) {
- return Status(ErrorCodes::BadValue, "$min must be a BSONObj");
- }
- findCommand->setMin(e.embeddedObject().getOwned());
- } else if (name == "max") {
- if (!e.isABSONObj()) {
- return Status(ErrorCodes::BadValue, "$max must be a BSONObj");
- }
- findCommand->setMax(e.embeddedObject().getOwned());
- } else if (name == "hint") {
- if (e.isABSONObj()) {
- findCommand->setHint(e.embeddedObject().getOwned());
- } else if (String == e.type()) {
- findCommand->setHint(e.wrap());
- } else {
- return Status(ErrorCodes::BadValue,
- "$hint must be either a string or nested object");
- }
- } else if (name == "returnKey") {
- // Won't throw.
- if (e.trueValue()) {
- findCommand->setReturnKey(true);
- }
- } else if (name == "showDiskLoc") {
- // Won't throw.
- if (e.trueValue()) {
- findCommand->setShowRecordId(true);
- addShowRecordIdMetaProj(findCommand);
- }
- } else if (name == "maxTimeMS") {
- StatusWith<int> maxTimeMS = parseMaxTimeMS(e);
- if (!maxTimeMS.isOK()) {
- return maxTimeMS.getStatus();
- }
- findCommand->setMaxTimeMS(maxTimeMS.getValue());
- }
- }
- }
-
- return Status::OK();
-}
-
-Status initFindCommandRequest(int ntoskip,
- int queryOptions,
- const BSONObj& filter,
- const Query& querySettings,
- const BSONObj& proj,
- FindCommandRequest* findCommand) {
- if (!proj.isEmpty()) {
- findCommand->setProjection(proj.getOwned());
- }
- if (ntoskip) {
- findCommand->setSkip(ntoskip);
- }
-
- // Initialize flags passed as 'queryOptions' bit vector.
- initFromInt(queryOptions, findCommand);
-
- findCommand->setFilter(filter.getOwned());
- Status status = initFullQuery(querySettings.getFullSettingsDeprecated(), findCommand);
- if (!status.isOK()) {
- return status;
- }
-
- // It's not possible to specify readConcern in a legacy query message, so initialize it to
- // an empty readConcern object, ie. equivalent to `readConcern: {}`. This ensures that
- // mongos passes this empty readConcern to shards.
- findCommand->setReadConcern(BSONObj());
-
- return validateFindCommandRequest(*findCommand);
-}
-
-} // namespace
Status validateGetMoreCollectionName(StringData collectionName) {
if (collectionName.empty()) {
@@ -380,27 +234,6 @@ void validateCursorResponse(const BSONObj& outputAsBson) {
}
}
-//
-// Old QueryRequest parsing code: SOON TO BE DEPRECATED.
-//
-
-StatusWith<std::unique_ptr<FindCommandRequest>> fromLegacyQuery(NamespaceStringOrUUID nssOrUuid,
- const BSONObj& filter,
- const Query& querySettings,
- const BSONObj& proj,
- int ntoskip,
- int queryOptions) {
- auto findCommand = std::make_unique<FindCommandRequest>(std::move(nssOrUuid));
-
- Status status = initFindCommandRequest(
- ntoskip, queryOptions, filter, querySettings, proj, findCommand.get());
- if (!status.isOK()) {
- return status;
- }
-
- return std::move(findCommand);
-}
-
StatusWith<BSONObj> asAggregationCommand(const FindCommandRequest& findCommand) {
BSONObjBuilder aggregationBuilder;
diff --git a/src/mongo/db/query/query_request_helper.h b/src/mongo/db/query/query_request_helper.h
index 3c7cbc53b89..4044083d89f 100644
--- a/src/mongo/db/query/query_request_helper.h
+++ b/src/mongo/db/query/query_request_helper.h
@@ -144,19 +144,10 @@ TailableModeEnum getTailableMode(const FindCommandRequest& findCommand);
*/
void validateCursorResponse(const BSONObj& outputAsBson);
-//
-// Old parsing code: SOON TO BE DEPRECATED.
-//
-
/**
- * Parse the provided legacy query object and parameters to construct a FindCommandRequest.
+ * Updates the projection object with a $meta projection for the showRecordId option.
*/
-StatusWith<std::unique_ptr<FindCommandRequest>> fromLegacyQuery(NamespaceStringOrUUID nsOrUuid,
- const BSONObj& filter,
- const Query& querySettings,
- const BSONObj& proj,
- int ntoskip,
- int queryOptions);
+void addShowRecordIdMetaProj(FindCommandRequest* findCommand);
} // namespace query_request_helper
} // namespace mongo
diff --git a/src/mongo/db/query/query_request_test.cpp b/src/mongo/db/query/query_request_test.cpp
index eba29f8c27e..88c41a2d821 100644
--- a/src/mongo/db/query/query_request_test.cpp
+++ b/src/mongo/db/query/query_request_test.cpp
@@ -1548,64 +1548,6 @@ TEST(QueryRequestTest, ConvertToFindWithAllowDiskUseFalseSucceeds) {
ASSERT_FALSE(findCmd[FindCommandRequest::kAllowDiskUseFieldName].booleanSafe());
}
-TEST(QueryRequestTest, ParseFromLegacyQuery) {
- const auto kSkip = 1;
- const NamespaceString nss("test.testns");
-
- unique_ptr<FindCommandRequest> findCommand(assertGet(query_request_helper::fromLegacyQuery(
- nss,
- fromjson("{query: 1}") /*filter*/,
- Query().sort(BSON("sort" << 1)).hint(BSON("hint" << 1)),
- BSON("proj" << 1),
- kSkip,
- QueryOption_Exhaust)));
-
- ASSERT_EQ(*findCommand->getNamespaceOrUUID().nss(), nss);
- ASSERT_BSONOBJ_EQ(findCommand->getFilter(), fromjson("{query: 1}"));
- ASSERT_BSONOBJ_EQ(findCommand->getProjection(), fromjson("{proj: 1}"));
- ASSERT_BSONOBJ_EQ(findCommand->getSort(), fromjson("{sort: 1}"));
- ASSERT_BSONOBJ_EQ(findCommand->getHint(), fromjson("{hint: 1}"));
- ASSERT_EQ(findCommand->getSkip(), boost::optional<int64_t>(kSkip));
- ASSERT_FALSE(findCommand->getNtoreturn());
- ASSERT_EQ(findCommand->getSingleBatch(), false);
- ASSERT_EQ(findCommand->getNoCursorTimeout(), false);
- ASSERT_EQ(findCommand->getTailable(), false);
- ASSERT_EQ(findCommand->getAllowPartialResults(), false);
-}
-
-TEST(QueryRequestTest, ParseFromLegacyQueryOplogReplayFlagAllowed) {
- const NamespaceString nss("test.testns");
- const BSONObj projectionObj{};
- const auto nToSkip = 0;
-
- // Test that parsing succeeds even if the oplog replay bit is set in the OP_QUERY message. This
- // flag may be set by old clients.
- auto options = QueryOption_OplogReplay_DEPRECATED;
- unique_ptr<FindCommandRequest> findCommand(assertGet(query_request_helper::fromLegacyQuery(
- nss, fromjson("{query: 1}"), Query().sort("sort", 1), projectionObj, nToSkip, options)));
-
- // Verify that if we reserialize the find command, the 'oplogReplay' field
- // does not appear.
- BSONObjBuilder bob;
- findCommand->serialize(BSONObj(), &bob);
- auto reserialized = bob.obj();
-
- ASSERT_BSONOBJ_EQ(reserialized,
- BSON("find"
- << "testns"
- << "filter" << BSON("query" << 1) << "sort" << BSON("sort" << 1)
- << "readConcern" << BSONObj{}));
-}
-
-TEST(QueryRequestTest, ParseFromLegacyQueryUnwrapped) {
- const NamespaceString nss("test.testns");
- unique_ptr<FindCommandRequest> findCommand(assertGet(query_request_helper::fromLegacyQuery(
- nss, fromjson("{foo: 1}"), Query(), BSONObj(), 0, QueryOption_Exhaust)));
-
- ASSERT_EQ(*findCommand->getNamespaceOrUUID().nss(), nss);
- ASSERT_BSONOBJ_EQ(findCommand->getFilter(), fromjson("{foo: 1}"));
-}
-
TEST(QueryRequestHelperTest, ValidateResponseMissingFields) {
BSONObjBuilder builder;
ASSERT_THROWS_CODE(
diff --git a/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp b/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp
index 1b341ab4b11..08f95264959 100644
--- a/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp
+++ b/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp
@@ -49,7 +49,7 @@ BSONObj getPersistedDefaultRWConcernDocument(OperationContext* opCtx) {
!MONGO_unlikely(failRWCDefaultsLookup.shouldFail()));
DBDirectClient client(opCtx);
- return client.findOne(NamespaceString::kConfigSettingsNamespace.toString(),
+ return client.findOne(NamespaceString::kConfigSettingsNamespace,
BSON("_id" << ReadWriteConcernDefaults::kPersistedDocumentId));
}
diff --git a/src/mongo/db/repl/all_database_cloner.cpp b/src/mongo/db/repl/all_database_cloner.cpp
index 8a7159b5dde..d18f67cc39f 100644
--- a/src/mongo/db/repl/all_database_cloner.cpp
+++ b/src/mongo/db/repl/all_database_cloner.cpp
@@ -132,7 +132,8 @@ BaseCloner::AfterStageBehavior AllDatabaseCloner::getInitialSyncIdStage() {
if (wireVersion < WireVersion::RESUMABLE_INITIAL_SYNC)
return kContinueNormally;
auto initialSyncId = getClient()->findOne(
- ReplicationConsistencyMarkersImpl::kDefaultInitialSyncIdNamespace.toString(), BSONObj{});
+ NamespaceString{ReplicationConsistencyMarkersImpl::kDefaultInitialSyncIdNamespace},
+ BSONObj{});
uassert(ErrorCodes::InitialSyncFailure,
"Cannot retrieve sync source initial sync ID",
!initialSyncId.isEmpty());
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index 6be1ed0b32b..50d012ab6c8 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -286,9 +286,13 @@ Status _checkPrecondition(OperationContext* opCtx,
}
DBDirectClient db(opCtx);
- // The preconditions come in "q: {{query: {...}, orderby: ..., etc.}}" format.
+ // The preconditions come in "q: {{query: {...}, orderby: ..., etc.}}" format. This format
+ // is no longer used either internally or over the wire in other contexts. We are using a
+ // legacy API from 'DBDirectClient' in order to parse this format and convert it into the
+ // corresponding find command.
auto preconditionQuery = Query::fromBSONDeprecated(preCondition["q"].Obj());
- BSONObj realres = db.findOne(nss.ns(), preconditionQuery.getFilter(), preconditionQuery);
+ auto cursor = db.query(nss, preconditionQuery.getFilter(), preconditionQuery, 1 /*limit*/);
+ BSONObj realres = cursor->more() ? cursor->nextSafe() : BSONObj{};
// Get collection default collation.
auto databaseHolder = DatabaseHolder::get(opCtx);
diff --git a/src/mongo/db/repl/initial_sync_base_cloner.cpp b/src/mongo/db/repl/initial_sync_base_cloner.cpp
index 0c00012b3f7..c08d102ca9c 100644
--- a/src/mongo/db/repl/initial_sync_base_cloner.cpp
+++ b/src/mongo/db/repl/initial_sync_base_cloner.cpp
@@ -128,7 +128,7 @@ Status InitialSyncBaseCloner::checkInitialSyncIdIsUnchanged() {
BSONObj initialSyncId;
try {
initialSyncId = getClient()->findOne(
- ReplicationConsistencyMarkersImpl::kDefaultInitialSyncIdNamespace.toString(),
+ NamespaceString{ReplicationConsistencyMarkersImpl::kDefaultInitialSyncIdNamespace},
BSONObj{});
} catch (DBException& e) {
if (ErrorCodes::isRetriableError(e)) {
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index df142f4a773..0a22256070e 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -1951,11 +1951,10 @@ void setNewTimestamp(ServiceContext* service, const Timestamp& newTime) {
void initTimestampFromOplog(OperationContext* opCtx, const NamespaceString& oplogNss) {
DBDirectClient c(opCtx);
static const BSONObj reverseNaturalObj = BSON("$natural" << -1);
- BSONObj lastOp = c.findOne(oplogNss.ns(),
- BSONObj{},
- Query().sort(reverseNaturalObj),
- nullptr,
- QueryOption_SecondaryOk);
+ FindCommandRequest findCmd{oplogNss};
+ findCmd.setSort(reverseNaturalObj);
+ BSONObj lastOp =
+ c.findOne(std::move(findCmd), ReadPreferenceSetting{ReadPreference::SecondaryPreferred});
if (!lastOp.isEmpty()) {
LOGV2_DEBUG(21256, 1, "replSet setting last Timestamp");
diff --git a/src/mongo/db/repl/oplog_applier_impl_test.cpp b/src/mongo/db/repl/oplog_applier_impl_test.cpp
index 9996da62a0e..b6f814be2de 100644
--- a/src/mongo/db/repl/oplog_applier_impl_test.cpp
+++ b/src/mongo/db/repl/oplog_applier_impl_test.cpp
@@ -2990,7 +2990,7 @@ TEST_F(OplogApplierImplTxnTableTest, MultiApplyUpdatesTheTransactionTable) {
// The txnNum and optime of the only write were saved.
auto resultSingleDoc =
- client.findOne(NamespaceString::kSessionTransactionsTableNamespace.ns(),
+ client.findOne(NamespaceString::kSessionTransactionsTableNamespace,
BSON(SessionTxnRecord::kSessionIdFieldName << lsidSingle.toBSON()));
ASSERT_TRUE(!resultSingleDoc.isEmpty());
@@ -3002,7 +3002,7 @@ TEST_F(OplogApplierImplTxnTableTest, MultiApplyUpdatesTheTransactionTable) {
// The txnNum and optime of the write with the larger txnNum were saved.
auto resultDiffTxnDoc =
- client.findOne(NamespaceString::kSessionTransactionsTableNamespace.ns(),
+ client.findOne(NamespaceString::kSessionTransactionsTableNamespace,
BSON(SessionTxnRecord::kSessionIdFieldName << lsidDiffTxn.toBSON()));
ASSERT_TRUE(!resultDiffTxnDoc.isEmpty());
@@ -3014,7 +3014,7 @@ TEST_F(OplogApplierImplTxnTableTest, MultiApplyUpdatesTheTransactionTable) {
// The txnNum and optime of the write with the later optime were saved.
auto resultSameTxnDoc =
- client.findOne(NamespaceString::kSessionTransactionsTableNamespace.ns(),
+ client.findOne(NamespaceString::kSessionTransactionsTableNamespace,
BSON(SessionTxnRecord::kSessionIdFieldName << lsidSameTxn.toBSON()));
ASSERT_TRUE(!resultSameTxnDoc.isEmpty());
@@ -3026,7 +3026,7 @@ TEST_F(OplogApplierImplTxnTableTest, MultiApplyUpdatesTheTransactionTable) {
// There is no entry for the write with no txnNumber.
auto resultNoTxn =
- client.findOne(NamespaceString::kSessionTransactionsTableNamespace.ns(),
+ client.findOne(NamespaceString::kSessionTransactionsTableNamespace,
BSON(SessionTxnRecord::kSessionIdFieldName << lsidNoTxn.toBSON()));
ASSERT_TRUE(resultNoTxn.isEmpty());
}
diff --git a/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp b/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp
index 72da6429fd2..0743407a534 100644
--- a/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp
+++ b/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp
@@ -348,7 +348,7 @@ void checkTxnTable(OperationContext* opCtx,
boost::optional<repl::OpTime> expectedStartOpTime,
boost::optional<DurableTxnStateEnum> expectedState) {
DBDirectClient client(opCtx);
- auto result = client.findOne(NamespaceString::kSessionTransactionsTableNamespace.ns(),
+ auto result = client.findOne(NamespaceString::kSessionTransactionsTableNamespace,
BSON(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON()));
ASSERT_FALSE(result.isEmpty());
@@ -392,7 +392,7 @@ StatusWith<BSONObj> CollectionReader::next() {
bool docExists(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc) {
DBDirectClient client(opCtx);
- auto result = client.findOne(nss.ns(), doc);
+ auto result = client.findOne(nss, doc);
return !result.isEmpty();
}
diff --git a/src/mongo/db/repl/oplog_fetcher.cpp b/src/mongo/db/repl/oplog_fetcher.cpp
index b3f06150854..3c57ed26f92 100644
--- a/src/mongo/db/repl/oplog_fetcher.cpp
+++ b/src/mongo/db/repl/oplog_fetcher.cpp
@@ -1042,10 +1042,11 @@ Status OplogFetcher::_checkTooStaleToSyncFromSource(const OpTime lastFetched,
BSONObj remoteFirstOplogEntry;
try {
// Query for the first oplog entry in the sync source's oplog.
- auto query = Query().sort(BSON("$natural" << 1));
+ FindCommandRequest findRequest{_nss};
+ findRequest.setSort(BSON("$natural" << 1));
// Since this function is called after the first batch, the exhaust stream has not been
// started yet. As a result, using the same connection is safe.
- remoteFirstOplogEntry = _conn->findOne(_nss.ns(), BSONObj{}, query);
+ remoteFirstOplogEntry = _conn->findOne(std::move(findRequest));
} catch (DBException& e) {
// If an error occurs with the query, throw an error.
return Status(ErrorCodes::TooStaleToSyncFromSource, e.reason());
diff --git a/src/mongo/db/repl/rollback_source_impl.cpp b/src/mongo/db/repl/rollback_source_impl.cpp
index c6184fb7259..f174d3c718c 100644
--- a/src/mongo/db/repl/rollback_source_impl.cpp
+++ b/src/mongo/db/repl/rollback_source_impl.cpp
@@ -66,22 +66,19 @@ int RollbackSourceImpl::getRollbackId() const {
}
BSONObj RollbackSourceImpl::getLastOperation() const {
- return _getConnection()->findOne(_collectionName,
- BSONObj{},
- Query().sort(BSON("$natural" << -1)),
- nullptr,
- QueryOption_SecondaryOk,
- ReadConcernArgs::kImplicitDefault);
+ FindCommandRequest findCmd{NamespaceString{_collectionName}};
+ findCmd.setSort(BSON("$natural" << -1));
+ findCmd.setReadConcern(ReadConcernArgs::kImplicitDefault);
+ return _getConnection()->findOne(std::move(findCmd),
+ ReadPreferenceSetting{ReadPreference::SecondaryPreferred});
}
BSONObj RollbackSourceImpl::findOne(const NamespaceString& nss, const BSONObj& filter) const {
+ FindCommandRequest findCmd{nss};
+ findCmd.setFilter(filter);
+ findCmd.setReadConcern(ReadConcernArgs::kImplicitDefault);
return _getConnection()
- ->findOne(nss.toString(),
- filter,
- Query(),
- nullptr,
- QueryOption_SecondaryOk,
- ReadConcernArgs::kImplicitDefault)
+ ->findOne(std::move(findCmd), ReadPreferenceSetting{ReadPreference::SecondaryPreferred})
.getOwned();
}
diff --git a/src/mongo/db/repl/tenant_collection_cloner.cpp b/src/mongo/db/repl/tenant_collection_cloner.cpp
index 35a99338271..461a6beab8a 100644
--- a/src/mongo/db/repl/tenant_collection_cloner.cpp
+++ b/src/mongo/db/repl/tenant_collection_cloner.cpp
@@ -348,9 +348,10 @@ BaseCloner::AfterStageBehavior TenantCollectionCloner::createCollectionStage() {
// (createCollection/createIndex) don't get stamped with the fromTenantMigration field.
ON_BLOCK_EXIT([&opCtx] { tenantMigrationRecipientInfo(opCtx.get()) = boost::none; });
- auto fieldsToReturn = BSON("_id" << 1);
- _lastDocId = client.findOne(
- _existingNss->ns(), BSONObj{}, Query().sort(BSON("_id" << -1)), &fieldsToReturn);
+ FindCommandRequest findCmd{*_existingNss};
+ findCmd.setSort(BSON("_id" << -1));
+ findCmd.setProjection(BSON("_id" << 1));
+ _lastDocId = client.findOne(std::move(findCmd));
if (!_lastDocId.isEmpty()) {
// The collection is not empty. Skip creating indexes and resume cloning from the last
// document.
diff --git a/src/mongo/db/repl/tenant_migration_recipient_service.cpp b/src/mongo/db/repl/tenant_migration_recipient_service.cpp
index 16bda5828f4..a38a4357179 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_service.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_service.cpp
@@ -583,13 +583,12 @@ OpTime TenantMigrationRecipientService::Instance::_getDonorMajorityOpTime(
std::unique_ptr<mongo::DBClientConnection>& client) {
auto oplogOpTimeFields =
BSON(OplogEntry::kTimestampFieldName << 1 << OplogEntry::kTermFieldName << 1);
- auto majorityOpTimeBson =
- client->findOne(NamespaceString::kRsOplogNamespace.ns(),
- BSONObj{},
- Query().sort("$natural", -1),
- &oplogOpTimeFields,
- QueryOption_SecondaryOk,
- ReadConcernArgs(ReadConcernLevel::kMajorityReadConcern).toBSONInner());
+ FindCommandRequest findCmd{NamespaceString::kRsOplogNamespace};
+ findCmd.setSort(BSON("$natural" << -1));
+ findCmd.setProjection(oplogOpTimeFields);
+ findCmd.setReadConcern(ReadConcernArgs(ReadConcernLevel::kMajorityReadConcern).toBSONInner());
+ auto majorityOpTimeBson = client->findOne(
+ std::move(findCmd), ReadPreferenceSetting{ReadPreference::SecondaryPreferred});
uassert(5272003, "Found no entries in the remote oplog", !majorityOpTimeBson.isEmpty());
auto majorityOpTime = uassertStatusOK(OpTime::parseFromOplogEntry(majorityOpTimeBson));
@@ -878,13 +877,13 @@ void TenantMigrationRecipientService::Instance::_getStartOpTimesFromDonor(WithLo
const auto preparedState = DurableTxnState_serializer(DurableTxnStateEnum::kPrepared);
const auto inProgressState = DurableTxnState_serializer(DurableTxnStateEnum::kInProgress);
auto transactionTableOpTimeFields = BSON(SessionTxnRecord::kStartOpTimeFieldName << 1);
+ FindCommandRequest findCmd{NamespaceString::kSessionTransactionsTableNamespace};
+ findCmd.setFilter(BSON("state" << BSON("$in" << BSON_ARRAY(preparedState << inProgressState))));
+ findCmd.setSort(BSON(SessionTxnRecord::kStartOpTimeFieldName.toString() << 1));
+ findCmd.setProjection(transactionTableOpTimeFields);
+ findCmd.setReadConcern(ReadConcernArgs(ReadConcernLevel::kMajorityReadConcern).toBSONInner());
auto earliestOpenTransactionBson = _client->findOne(
- NamespaceString::kSessionTransactionsTableNamespace.ns(),
- BSON("state" << BSON("$in" << BSON_ARRAY(preparedState << inProgressState))),
- Query().sort(SessionTxnRecord::kStartOpTimeFieldName.toString(), 1),
- &transactionTableOpTimeFields,
- QueryOption_SecondaryOk,
- ReadConcernArgs(ReadConcernLevel::kMajorityReadConcern).toBSONInner());
+ std::move(findCmd), ReadPreferenceSetting{ReadPreference::SecondaryPreferred});
LOGV2_DEBUG(4880602,
2,
"Transaction table entry for earliest transaction that was open at the read "
@@ -1923,13 +1922,11 @@ void TenantMigrationRecipientService::Instance::_compareRecipientAndDonorFCV() c
return;
}
- auto donorFCVbson =
- _client->findOne(NamespaceString::kServerConfigurationNamespace.ns(),
- BSON("_id" << multiversion::kParameterName),
- Query(),
- nullptr,
- QueryOption_SecondaryOk,
- ReadConcernArgs(ReadConcernLevel::kMajorityReadConcern).toBSONInner());
+ FindCommandRequest findCmd{NamespaceString::kServerConfigurationNamespace};
+ findCmd.setFilter(BSON("_id" << multiversion::kParameterName));
+ findCmd.setReadConcern(ReadConcernArgs(ReadConcernLevel::kMajorityReadConcern).toBSONInner());
+ auto donorFCVbson = _client->findOne(std::move(findCmd),
+ ReadPreferenceSetting{ReadPreference::SecondaryPreferred});
uassert(5382302, "FCV on donor not set", !donorFCVbson.isEmpty());
diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp
index e41d49db40b..64224e03905 100644
--- a/src/mongo/db/s/chunk_splitter.cpp
+++ b/src/mongo/db/s/chunk_splitter.cpp
@@ -145,10 +145,10 @@ BSONObj findExtremeKeyForShard(OperationContext* opCtx,
const NamespaceString& nss,
const ShardKeyPattern& shardKeyPattern,
bool doSplitAtLower) {
- Query q;
+ FindCommandRequest findCmd{nss};
if (doSplitAtLower) {
- q.sort(shardKeyPattern.toBSON());
+ findCmd.setSort(shardKeyPattern.toBSON());
} else {
// need to invert shard key pattern to sort backwards
BSONObjBuilder r;
@@ -160,7 +160,7 @@ BSONObj findExtremeKeyForShard(OperationContext* opCtx,
r.append(e.fieldName(), -1 * e.number());
}
- q.sort(r.obj());
+ findCmd.setSort(r.obj());
}
DBDirectClient client(opCtx);
@@ -168,14 +168,12 @@ BSONObj findExtremeKeyForShard(OperationContext* opCtx,
BSONObj end;
if (doSplitAtLower) {
- // Splitting close to the lower bound means that the split point will be the
- // upper bound. Chunk range upper bounds are exclusive so skip a document to
- // make the lower half of the split end up with a single document.
- std::unique_ptr<DBClientCursor> cursor = client.query(nss,
- BSONObj{},
- q,
- 1, /* limit */
- 1 /* nToSkip */);
+ // Splitting close to the lower bound means that the split point will be the upper bound.
+ // Chunk range upper bounds are exclusive so skip a document to make the lower half of the
+ // split end up with a single document.
+ findCmd.setLimit(1);
+ findCmd.setSkip(1);
+ std::unique_ptr<DBClientCursor> cursor = client.find(std::move(findCmd));
uassert(40618,
str::stream() << "failed to initialize cursor during auto split due to "
@@ -186,7 +184,7 @@ BSONObj findExtremeKeyForShard(OperationContext* opCtx,
end = cursor->next().getOwned();
}
} else {
- end = client.findOne(nss.ns(), BSONObj{}, q);
+ end = client.findOne(std::move(findCmd));
}
if (end.isEmpty()) {
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
index b89a6311fbf..1adb7a78174 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
@@ -159,7 +159,7 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx,
(std::string) "^" + pcrecpp::RE::QuoteMeta(dbName.toString()) + "$",
"i");
- auto dbDoc = client.findOne(DatabaseType::ConfigNS.ns(), queryBuilder.obj());
+ auto dbDoc = client.findOne(DatabaseType::ConfigNS, queryBuilder.obj());
auto const [primaryShardPtr, database] = [&] {
if (!dbDoc.isEmpty()) {
auto actualDb = uassertStatusOK(DatabaseType::fromBSON(dbDoc));
diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp
index 603fd762ebf..7b1fd9da8a4 100644
--- a/src/mongo/db/s/create_collection_coordinator.cpp
+++ b/src/mongo/db/s/create_collection_coordinator.cpp
@@ -195,7 +195,7 @@ bool checkIfCollectionIsEmpty(OperationContext* opCtx, const NamespaceString& ns
// command doesn't just consult the cached metadata, which may not always be
// correct
DBDirectClient localClient(opCtx);
- return localClient.findOne(nss.ns(), BSONObj{}).isEmpty();
+ return localClient.findOne(nss, BSONObj{}).isEmpty();
}
int getNumShards(OperationContext* opCtx) {
diff --git a/src/mongo/db/s/persistent_task_queue.h b/src/mongo/db/s/persistent_task_queue.h
index fcd75ced175..25736e6d6ba 100644
--- a/src/mongo/db/s/persistent_task_queue.h
+++ b/src/mongo/db/s/persistent_task_queue.h
@@ -221,16 +221,19 @@ bool PersistentTaskQueue<T>::empty(OperationContext* opCtx) const {
template <typename T>
TaskId PersistentTaskQueue<T>::_loadLastId(DBDirectClient& client) {
- auto fieldsToReturn = BSON("_id" << 1);
- auto maxId = client.findOne(
- _storageNss.toString(), BSONObj{}, Query().sort(BSON("_id" << -1)), &fieldsToReturn);
+ FindCommandRequest findCmd{_storageNss};
+ findCmd.setSort(BSON("_id" << -1));
+ findCmd.setProjection(BSON("_id" << 1));
+ auto maxId = client.findOne(std::move(findCmd));
return maxId.getField("_id").Long();
}
template <typename T>
typename boost::optional<typename BlockingTaskQueue<T>::Record>
PersistentTaskQueue<T>::_loadNextRecord(DBDirectClient& client) {
- auto bson = client.findOne(_storageNss.toString(), BSONObj{}, Query().sort("_id"));
+ FindCommandRequest findCmd{_storageNss};
+ findCmd.setSort(BSON("_id" << 1));
+ auto bson = client.findOne(std::move(findCmd));
boost::optional<typename PersistentTaskQueue<T>::Record> result;
diff --git a/src/mongo/db/s/resharding/resharding_agg_test.cpp b/src/mongo/db/s/resharding/resharding_agg_test.cpp
index 5d49e96409d..1c507297bd1 100644
--- a/src/mongo/db/s/resharding/resharding_agg_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_agg_test.cpp
@@ -154,7 +154,7 @@ public:
const Document& documentKey,
boost::optional<BSONObj> readConcern) {
DBDirectClient client(expCtx->opCtx);
- auto result = client.findOne(nss.ns(), documentKey.toBson());
+ auto result = client.findOne(nss, documentKey.toBson());
if (result.isEmpty()) {
return boost::none;
}
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
index ae801434182..eed075dfe3a 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
@@ -225,8 +225,7 @@ public:
ReshardingCoordinatorDocument getCoordinatorDoc(OperationContext* opCtx) {
DBDirectClient client(opCtx);
- auto doc =
- client.findOne(NamespaceString::kConfigReshardingOperationsNamespace.ns(), BSONObj{});
+ auto doc = client.findOne(NamespaceString::kConfigReshardingOperationsNamespace, BSONObj{});
IDLParserErrorContext errCtx("reshardingCoordFromTest");
return ReshardingCoordinatorDocument::parse(errCtx, doc);
}
@@ -879,7 +878,7 @@ TEST_F(ReshardingCoordinatorServiceTest, StepDownStepUpEachTransition) {
// config.collections should not have the document with the old UUID.
std::vector<ChunkType> foundCollections;
auto collection =
- client.findOne(CollectionType::ConfigNS.ns(),
+ client.findOne(CollectionType::ConfigNS,
BSON(CollectionType::kNssFieldName << doc.getSourceNss().ns()));
ASSERT_EQUALS(collection.isEmpty(), false);
@@ -924,9 +923,8 @@ TEST_F(ReshardingCoordinatorServiceTest, ReshardingCoordinatorFailsIfMigrationNo
// Check that reshardCollection keeps allowMigrations setting intact.
{
DBDirectClient client(opCtx);
- CollectionType collDoc(
- client.findOne(CollectionType::ConfigNS.ns(),
- BSON(CollectionType::kNssFieldName << _originalNss.ns())));
+ CollectionType collDoc(client.findOne(
+ CollectionType::ConfigNS, BSON(CollectionType::kNssFieldName << _originalNss.ns())));
ASSERT_FALSE(collDoc.getAllowMigrations());
}
}
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
index 91eb6d31aa0..9116e731841 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
@@ -237,7 +237,7 @@ protected:
void readReshardingCoordinatorDocAndAssertMatchesExpected(
OperationContext* opCtx, ReshardingCoordinatorDocument expectedCoordinatorDoc) {
DBDirectClient client(opCtx);
- auto doc = client.findOne(NamespaceString::kConfigReshardingOperationsNamespace.ns(),
+ auto doc = client.findOne(NamespaceString::kConfigReshardingOperationsNamespace,
BSON("ns" << expectedCoordinatorDoc.getSourceNss().ns()));
auto coordinatorDoc = ReshardingCoordinatorDocument::parse(
@@ -320,7 +320,7 @@ protected:
const ReshardingCoordinatorDocument& expectedCoordinatorDoc) {
DBDirectClient client(opCtx);
CollectionType onDiskEntry(
- client.findOne(CollectionType::ConfigNS.ns(), BSON("_id" << _originalNss.ns())));
+ client.findOne(CollectionType::ConfigNS, BSON("_id" << _originalNss.ns())));
ASSERT_EQUALS(onDiskEntry.getAllowMigrations(), expectedCollType.getAllowMigrations());
@@ -379,7 +379,7 @@ protected:
void assertTemporaryCollectionCatalogEntryMatchesExpected(
OperationContext* opCtx, boost::optional<CollectionType> expectedCollType) {
DBDirectClient client(opCtx);
- auto doc = client.findOne(CollectionType::ConfigNS.ns(), BSON("_id" << _tempNss.ns()));
+ auto doc = client.findOne(CollectionType::ConfigNS, BSON("_id" << _tempNss.ns()));
if (!expectedCollType) {
ASSERT(doc.isEmpty());
return;
@@ -618,10 +618,10 @@ protected:
// Check that chunks and tags under the temp namespace have been removed
DBDirectClient client(opCtx);
- auto chunkDoc = client.findOne(ChunkType::ConfigNS.ns(), BSON("ns" << _tempNss.ns()));
+ auto chunkDoc = client.findOne(ChunkType::ConfigNS, BSON("ns" << _tempNss.ns()));
ASSERT(chunkDoc.isEmpty());
- auto tagDoc = client.findOne(TagsType::ConfigNS.ns(), BSON("ns" << _tempNss.ns()));
+ auto tagDoc = client.findOne(TagsType::ConfigNS, BSON("ns" << _tempNss.ns()));
ASSERT(tagDoc.isEmpty());
}
@@ -647,7 +647,7 @@ protected:
// Check that the entry is removed from config.reshardingOperations
DBDirectClient client(opCtx);
- auto doc = client.findOne(NamespaceString::kConfigReshardingOperationsNamespace.ns(),
+ auto doc = client.findOne(NamespaceString::kConfigReshardingOperationsNamespace,
BSON("ns" << expectedCoordinatorDoc.getSourceNss().ns()));
ASSERT(doc.isEmpty());
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier.cpp
index 1e47f26b6c0..69cf7393006 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier.cpp
@@ -200,7 +200,7 @@ boost::optional<ReshardingOplogApplierProgress> ReshardingOplogApplier::checkSto
OperationContext* opCtx, const ReshardingSourceId& id) {
DBDirectClient client(opCtx);
auto doc = client.findOne(
- NamespaceString::kReshardingApplierProgressNamespace.ns(),
+ NamespaceString::kReshardingApplierProgressNamespace,
BSON(ReshardingOplogApplierProgress::kOplogSourceIdFieldName << id.toBSON()));
if (doc.isEmpty()) {
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
index aead93e4313..4459fdf5b22 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp
@@ -413,10 +413,10 @@ TEST_F(ReshardingOplogApplierTest, ApplyBasicCrud) {
ASSERT_OK(future.getNoThrow());
DBDirectClient client(operationContext());
- auto doc = client.findOne(appliedToNs().ns(), BSON("_id" << 1));
+ auto doc = client.findOne(appliedToNs(), BSON("_id" << 1));
ASSERT_BSONOBJ_EQ(BSONObj(), doc);
- doc = client.findOne(appliedToNs().ns(), BSON("_id" << 2));
+ doc = client.findOne(appliedToNs(), BSON("_id" << 2));
ASSERT_BSONOBJ_EQ(BSON("_id" << 2 << "x" << 1), doc);
auto progressDoc = ReshardingOplogApplier::checkStoredProgress(operationContext(), sourceId());
@@ -489,7 +489,7 @@ TEST_F(ReshardingOplogApplierTest, InsertTypeOplogAppliedInMultipleBatches) {
DBDirectClient client(operationContext());
for (int x = 0; x < 19; x++) {
- auto doc = client.findOne(appliedToNs().ns(), BSON("_id" << x));
+ auto doc = client.findOne(appliedToNs(), BSON("_id" << x));
ASSERT_BSONOBJ_EQ(BSON("_id" << x), doc);
}
@@ -527,7 +527,7 @@ TEST_F(ReshardingOplogApplierTest, ErrorDuringFirstBatchApply) {
ASSERT_EQ(future.getNoThrow(), ErrorCodes::FailedToParse);
DBDirectClient client(operationContext());
- auto doc = client.findOne(appliedToNs().ns(), BSON("_id" << 1));
+ auto doc = client.findOne(appliedToNs(), BSON("_id" << 1));
ASSERT_BSONOBJ_EQ(BSON("_id" << 1), doc);
auto progressDoc = ReshardingOplogApplier::checkStoredProgress(operationContext(), sourceId());
@@ -569,13 +569,13 @@ TEST_F(ReshardingOplogApplierTest, ErrorDuringSecondBatchApply) {
ASSERT_EQ(future.getNoThrow(), ErrorCodes::FailedToParse);
DBDirectClient client(operationContext());
- auto doc = client.findOne(appliedToNs().ns(), BSON("_id" << 1));
+ auto doc = client.findOne(appliedToNs(), BSON("_id" << 1));
ASSERT_BSONOBJ_EQ(BSON("_id" << 1), doc);
- doc = client.findOne(appliedToNs().ns(), BSON("_id" << 2));
+ doc = client.findOne(appliedToNs(), BSON("_id" << 2));
ASSERT_BSONOBJ_EQ(BSON("_id" << 2), doc);
- doc = client.findOne(appliedToNs().ns(), BSON("_id" << 3));
+ doc = client.findOne(appliedToNs(), BSON("_id" << 3));
ASSERT_BSONOBJ_EQ(BSON("_id" << 3), doc);
auto progressDoc = ReshardingOplogApplier::checkStoredProgress(operationContext(), sourceId());
@@ -610,7 +610,7 @@ TEST_F(ReshardingOplogApplierTest, ErrorWhileIteratingFirstOplog) {
ASSERT_EQ(future.getNoThrow(), ErrorCodes::InternalError);
DBDirectClient client(operationContext());
- auto doc = client.findOne(appliedToNs().ns(), BSON("_id" << 1));
+ auto doc = client.findOne(appliedToNs(), BSON("_id" << 1));
ASSERT_BSONOBJ_EQ(BSONObj(), doc);
auto progressDoc = ReshardingOplogApplier::checkStoredProgress(operationContext(), sourceId());
@@ -646,7 +646,7 @@ TEST_F(ReshardingOplogApplierTest, ErrorWhileIteratingFirstBatch) {
ASSERT_EQ(future.getNoThrow(), ErrorCodes::InternalError);
DBDirectClient client(operationContext());
- auto doc = client.findOne(appliedToNs().ns(), BSON("_id" << 1));
+ auto doc = client.findOne(appliedToNs(), BSON("_id" << 1));
ASSERT_BSONOBJ_EQ(BSONObj(), doc);
auto progressDoc = ReshardingOplogApplier::checkStoredProgress(operationContext(), sourceId());
@@ -686,13 +686,13 @@ TEST_F(ReshardingOplogApplierTest, ErrorWhileIteratingSecondBatch) {
ASSERT_EQ(future.getNoThrow(), ErrorCodes::InternalError);
DBDirectClient client(operationContext());
- auto doc = client.findOne(appliedToNs().ns(), BSON("_id" << 1));
+ auto doc = client.findOne(appliedToNs(), BSON("_id" << 1));
ASSERT_BSONOBJ_EQ(BSON("_id" << 1), doc);
- doc = client.findOne(appliedToNs().ns(), BSON("_id" << 2));
+ doc = client.findOne(appliedToNs(), BSON("_id" << 2));
ASSERT_BSONOBJ_EQ(BSON("_id" << 2), doc);
- doc = client.findOne(appliedToNs().ns(), BSON("_id" << 3));
+ doc = client.findOne(appliedToNs(), BSON("_id" << 3));
ASSERT_BSONOBJ_EQ(BSONObj(), doc);
auto progressDoc = ReshardingOplogApplier::checkStoredProgress(operationContext(), sourceId());
@@ -727,7 +727,7 @@ TEST_F(ReshardingOplogApplierTest, ExecutorIsShutDown) {
ASSERT_EQ(future.getNoThrow(), ErrorCodes::ShutdownInProgress);
DBDirectClient client(operationContext());
- auto doc = client.findOne(appliedToNs().ns(), BSON("_id" << 1));
+ auto doc = client.findOne(appliedToNs(), BSON("_id" << 1));
ASSERT_BSONOBJ_EQ(BSONObj(), doc);
auto progressDoc = ReshardingOplogApplier::checkStoredProgress(operationContext(), sourceId());
@@ -766,10 +766,10 @@ TEST_F(ReshardingOplogApplierTest, UnsupportedCommandOpsShouldError) {
ASSERT_EQ(future.getNoThrow(), ErrorCodes::OplogOperationUnsupported);
DBDirectClient client(operationContext());
- auto doc = client.findOne(appliedToNs().ns(), BSON("_id" << 1));
+ auto doc = client.findOne(appliedToNs(), BSON("_id" << 1));
ASSERT_BSONOBJ_EQ(BSON("_id" << 1), doc);
- doc = client.findOne(appliedToNs().ns(), BSON("_id" << 2));
+ doc = client.findOne(appliedToNs(), BSON("_id" << 2));
ASSERT_BSONOBJ_EQ(BSONObj(), doc);
auto progressDoc = ReshardingOplogApplier::checkStoredProgress(operationContext(), sourceId());
diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
index 895f1ad225f..d8c0b8f8335 100644
--- a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
@@ -255,10 +255,10 @@ protected:
DBDirectClient client(operationContext());
// The same logical session entry may be inserted more than once by a test case, so use a
// $natural sort to find the most recently inserted entry.
- auto bsonOplog =
- client.findOne(NamespaceString::kRsOplogNamespace.ns(),
- BSON(repl::OplogEntryBase::kSessionIdFieldName << sessionId.toBSON()),
- Query().sort(BSON("$natural" << -1)));
+ FindCommandRequest findCmd{NamespaceString::kRsOplogNamespace};
+ findCmd.setFilter(BSON(repl::OplogEntryBase::kSessionIdFieldName << sessionId.toBSON()));
+ findCmd.setSort(BSON("$natural" << -1));
+ auto bsonOplog = client.findOne(std::move(findCmd));
ASSERT(!bsonOplog.isEmpty());
auto oplogEntry = repl::MutableOplogEntry::parse(bsonOplog).getValue();
ASSERT_EQ(oplogEntry.getTxnNumber().get(), txnNum);
@@ -267,7 +267,7 @@ protected:
ASSERT(oplogEntry.getOpType() == repl::OpTypeEnum::kNoop);
auto bsonTxn =
- client.findOne(NamespaceString::kSessionTransactionsTableNamespace.ns(),
+ client.findOne(NamespaceString::kSessionTransactionsTableNamespace,
BSON(SessionTxnRecord::kSessionIdFieldName << sessionId.toBSON()));
ASSERT(!bsonTxn.isEmpty());
auto txn = SessionTxnRecord::parse(
@@ -284,7 +284,7 @@ protected:
DBDirectClient client(operationContext());
auto bsonOplog =
- client.findOne(NamespaceString::kRsOplogNamespace.ns(),
+ client.findOne(NamespaceString::kRsOplogNamespace,
BSON(repl::OplogEntryBase::kSessionIdFieldName << sessionId.toBSON()));
ASSERT_BSONOBJ_EQ(bsonOplog, {});
@@ -295,7 +295,7 @@ protected:
const ReshardingSourceId& sourceId) {
DBDirectClient client(operationContext());
auto progressDoc = client.findOne(
- NamespaceString::kReshardingTxnClonerProgressNamespace.ns(),
+ NamespaceString::kReshardingTxnClonerProgressNamespace,
BSON(ReshardingTxnClonerProgress::kSourceIdFieldName << sourceId.toBSON()));
if (progressDoc.isEmpty()) {
diff --git a/src/mongo/db/s/session_catalog_migration_destination_test.cpp b/src/mongo/db/s/session_catalog_migration_destination_test.cpp
index 639e9ac0a02..518f122762c 100644
--- a/src/mongo/db/s/session_catalog_migration_destination_test.cpp
+++ b/src/mongo/db/s/session_catalog_migration_destination_test.cpp
@@ -163,7 +163,7 @@ public:
repl::OplogEntry getOplog(OperationContext* opCtx, const repl::OpTime& opTime) {
DBDirectClient client(opCtx);
- auto oplogBSON = client.findOne(NamespaceString::kRsOplogNamespace.ns(), opTime.asQuery());
+ auto oplogBSON = client.findOne(NamespaceString::kRsOplogNamespace, opTime.asQuery());
ASSERT_FALSE(oplogBSON.isEmpty());
auto parseStatus = repl::OplogEntry::parse(oplogBSON);
@@ -1998,7 +1998,7 @@ TEST_F(SessionCatalogMigrationDestinationTest, MigratingKnownStmtWhileOplogTrunc
{
// Confirm that oplog is indeed empty.
DBDirectClient client(opCtx);
- auto result = client.findOne(NamespaceString::kRsOplogNamespace.ns(), BSONObj{});
+ auto result = client.findOne(NamespaceString::kRsOplogNamespace, BSONObj{});
ASSERT_TRUE(result.isEmpty());
}
diff --git a/src/mongo/db/s/session_catalog_migration_source.cpp b/src/mongo/db/s/session_catalog_migration_source.cpp
index 2981f132087..31f006da764 100644
--- a/src/mongo/db/s/session_catalog_migration_source.cpp
+++ b/src/mongo/db/s/session_catalog_migration_source.cpp
@@ -61,10 +61,8 @@ boost::optional<repl::OplogEntry> forgeNoopEntryFromImageCollection(
DBDirectClient client(opCtx);
BSONObj imageObj =
- client.findOne(NamespaceString::kConfigImagesNamespace.ns(),
- BSON("_id" << retryableFindAndModifyOplogEntry.getSessionId()->toBSON()),
- Query(),
- nullptr);
+ client.findOne(NamespaceString::kConfigImagesNamespace,
+ BSON("_id" << retryableFindAndModifyOplogEntry.getSessionId()->toBSON()));
if (imageObj.isEmpty()) {
return boost::none;
}
@@ -124,8 +122,7 @@ boost::optional<repl::OplogEntry> fetchPrePostImageOplog(OperationContext* opCtx
auto opTime = opTimeToFetch.value();
DBDirectClient client(opCtx);
- auto oplogBSON =
- client.findOne(NamespaceString::kRsOplogNamespace.ns(), opTime.asQuery(), Query(), nullptr);
+ auto oplogBSON = client.findOne(NamespaceString::kRsOplogNamespace, opTime.asQuery());
return uassertStatusOK(repl::OplogEntry::parse(oplogBSON));
}
@@ -437,8 +434,8 @@ bool SessionCatalogMigrationSource::_fetchNextNewWriteOplog(OperationContext* op
}
DBDirectClient client(opCtx);
- const auto& newWriteOplogDoc = client.findOne(
- NamespaceString::kRsOplogNamespace.ns(), nextOpTimeToFetch.asQuery(), Query(), nullptr);
+ const auto& newWriteOplogDoc =
+ client.findOne(NamespaceString::kRsOplogNamespace, nextOpTimeToFetch.asQuery());
uassert(40620,
str::stream() << "Unable to fetch oplog entry with opTime: "
diff --git a/src/mongo/db/s/shard_key_util.cpp b/src/mongo/db/s/shard_key_util.cpp
index 601a1624208..6f12df7d24d 100644
--- a/src/mongo/db/s/shard_key_util.cpp
+++ b/src/mongo/db/s/shard_key_util.cpp
@@ -227,7 +227,7 @@ void ValidationBehaviorsShardCollection::verifyCanCreateShardKeyIndex(
uassert(ErrorCodes::InvalidOptions,
"Please create an index that starts with the proposed shard key before "
"sharding the collection",
- _localClient->findOne(nss.ns(), BSONObj{}).isEmpty());
+ _localClient->findOne(nss, BSONObj{}).isEmpty());
}
void ValidationBehaviorsShardCollection::createShardKeyIndex(
diff --git a/src/mongo/db/s/sharding_ddl_util_test.cpp b/src/mongo/db/s/sharding_ddl_util_test.cpp
index e182d0f4431..9084f8ef170 100644
--- a/src/mongo/db/s/sharding_ddl_util_test.cpp
+++ b/src/mongo/db/s/sharding_ddl_util_test.cpp
@@ -156,7 +156,7 @@ TEST_F(ShardingDDLUtilTest, ShardedRenameMetadata) {
setupCollection(kToNss, KeyPattern(BSON("x" << 1)), originalToChunks);
// Get FROM collection document and chunks
- auto fromDoc = client.findOne(CollectionType::ConfigNS.ns(), fromCollQuery);
+ auto fromDoc = client.findOne(CollectionType::ConfigNS, fromCollQuery);
CollectionType fromCollection(fromDoc);
std::vector<BSONObj> fromChunks;
findN(client,
@@ -172,10 +172,10 @@ TEST_F(ShardingDDLUtilTest, ShardedRenameMetadata) {
opCtx, fromCollType, kToNss, ShardingCatalogClient::kMajorityWriteConcern);
// Check that the FROM config.collections entry has been deleted
- ASSERT(client.findOne(CollectionType::ConfigNS.ns(), fromCollQuery).isEmpty());
+ ASSERT(client.findOne(CollectionType::ConfigNS, fromCollQuery).isEmpty());
// Get TO collection document and chunks
- auto toDoc = client.findOne(CollectionType::ConfigNS.ns(), toCollQuery);
+ auto toDoc = client.findOne(CollectionType::ConfigNS, toCollQuery);
CollectionType toCollection(toDoc);
std::vector<BSONObj> toChunks;
findN(client,
diff --git a/src/mongo/db/s/transaction_coordinator_test.cpp b/src/mongo/db/s/transaction_coordinator_test.cpp
index 43fe5c3e48a..2d1187bc215 100644
--- a/src/mongo/db/s/transaction_coordinator_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_test.cpp
@@ -142,7 +142,7 @@ protected:
void waitUntilCoordinatorDocIsPresent() {
DBDirectClient dbClient(operationContext());
- while (dbClient.findOne(NamespaceString::kTransactionCoordinatorsNamespace.ns(), BSONObj{})
+ while (dbClient.findOne(NamespaceString::kTransactionCoordinatorsNamespace, BSONObj{})
.isEmpty())
;
}
@@ -156,14 +156,13 @@ protected:
do {
doc = TransactionCoordinatorDocument::parse(
IDLParserErrorContext("dummy"),
- dbClient.findOne(NamespaceString::kTransactionCoordinatorsNamespace.ns(),
- BSONObj{}));
+ dbClient.findOne(NamespaceString::kTransactionCoordinatorsNamespace, BSONObj{}));
} while (!doc.getDecision());
}
void waitUntilNoCoordinatorDocIsPresent() {
DBDirectClient dbClient(operationContext());
- while (!dbClient.findOne(NamespaceString::kTransactionCoordinatorsNamespace.ns(), BSONObj{})
+ while (!dbClient.findOne(NamespaceString::kTransactionCoordinatorsNamespace, BSONObj{})
.isEmpty())
;
}
diff --git a/src/mongo/db/s/transaction_coordinator_util.cpp b/src/mongo/db/s/transaction_coordinator_util.cpp
index bb6832314b7..f814b11f52c 100644
--- a/src/mongo/db/s/transaction_coordinator_util.cpp
+++ b/src/mongo/db/s/transaction_coordinator_util.cpp
@@ -168,7 +168,7 @@ repl::OpTime persistParticipantListBlocking(
// exists. Note that this is best-effort: the document may have been deleted or manually
// changed since the update above ran.
const auto doc = client.findOne(
- NamespaceString::kTransactionCoordinatorsNamespace.toString(),
+ NamespaceString::kTransactionCoordinatorsNamespace,
BSON(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON()));
uasserted(51025,
str::stream() << "While attempting to write participant list "
@@ -400,7 +400,7 @@ repl::OpTime persistDecisionBlocking(OperationContext* opCtx,
// exists. Note that this is best-effort: the document may have been deleted or manually
// changed since the update above ran.
const auto doc = client.findOne(
- NamespaceString::kTransactionCoordinatorsNamespace.ns(),
+ NamespaceString::kTransactionCoordinatorsNamespace,
BSON(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON()));
uasserted(51026,
str::stream() << "While attempting to write decision "
@@ -602,7 +602,7 @@ void deleteCoordinatorDocBlocking(OperationContext* opCtx,
// exists. Note that this is best-effort: the document may have been deleted or manually
// changed since the update above ran.
const auto doc = client.findOne(
- NamespaceString::kTransactionCoordinatorsNamespace.toString(),
+ NamespaceString::kTransactionCoordinatorsNamespace,
BSON(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON()));
uasserted(51027,
str::stream() << "While attempting to delete document for " << lsid.getId() << ':'
diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp
index f28244b594c..53dc2af4998 100644
--- a/src/mongo/db/transaction_participant.cpp
+++ b/src/mongo/db/transaction_participant.cpp
@@ -154,7 +154,7 @@ ActiveTransactionHistory fetchActiveTransactionHistory(OperationContext* opCtx,
// field has been set, bumping the global lock acquisition to an IX. That upconvert would
// require a flow control ticket to be obtained.
FlowControl::Bypass flowControlBypass(opCtx);
- auto result = client.findOne(NamespaceString::kSessionTransactionsTableNamespace.ns(),
+ auto result = client.findOne(NamespaceString::kSessionTransactionsTableNamespace,
BSON(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON()));
if (result.isEmpty()) {
return boost::none;
diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
index 9b849d051d8..fb71b3fd24e 100644
--- a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
+++ b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp
@@ -93,8 +93,70 @@ std::pair<rpc::UniqueReply, DBClientBase*> MockDBClientConnection::runCommandWit
_failed.store(true);
throw;
}
-} // namespace mongo
+}
+
+namespace {
+int nToSkipFromResumeAfter(const BSONObj& resumeAfter) {
+ if (resumeAfter.isEmpty()) {
+ return 0;
+ }
+
+ auto nElt = resumeAfter["n"];
+ if (!nElt || !nElt.isNumber()) {
+ return 0;
+ }
+
+ return nElt.numberInt();
+}
+} // namespace
+
+std::unique_ptr<DBClientCursor> MockDBClientConnection::bsonArrayToCursor(BSONArray results,
+ int nToSkip,
+ bool provideResumeToken,
+ int batchSize) {
+ BSONArray resultsInCursor;
+
+ // Resume query.
+ if (nToSkip != 0) {
+ BSONObjIterator iter(results);
+ BSONArrayBuilder builder;
+ auto numExamined = 0;
+
+ while (iter.more()) {
+ numExamined++;
+
+ if (numExamined < nToSkip + 1) {
+ iter.next();
+ continue;
+ }
+
+ builder.append(iter.next().Obj());
+ }
+ resultsInCursor = BSONArray(builder.obj());
+ } else {
+ // Yield all results instead (default).
+ resultsInCursor = BSONArray(results.copy());
+ }
+
+ return std::make_unique<DBClientMockCursor>(
+ this, resultsInCursor, provideResumeToken, batchSize);
+}
+std::unique_ptr<DBClientCursor> MockDBClientConnection::find(
+ FindCommandRequest findRequest, const ReadPreferenceSetting& readPref) {
+ checkConnection();
+ try {
+ int nToSkip = nToSkipFromResumeAfter(findRequest.getResumeAfter());
+ bool provideResumeToken = findRequest.getRequestResumeToken();
+ int batchSize = findRequest.getBatchSize().value_or(0);
+ BSONArray results = _remoteServer->find(_remoteServerInstanceID, findRequest);
+ return bsonArrayToCursor(std::move(results), nToSkip, provideResumeToken, batchSize);
+ } catch (const DBException&) {
+ _failed.store(true);
+ throw;
+ }
+ return nullptr;
+}
std::unique_ptr<mongo::DBClientCursor> MockDBClientConnection::query(
const NamespaceStringOrUUID& nsOrUuid,
@@ -127,9 +189,7 @@ std::unique_ptr<mongo::DBClientCursor> MockDBClientConnection::query(
auto nToSkip = 0;
BSONObj querySettingsAsBSON = querySettings.getFullSettingsDeprecated();
if (querySettingsAsBSON.hasField("$_resumeAfter")) {
- if (querySettingsAsBSON["$_resumeAfter"].Obj().hasField("n")) {
- nToSkip = querySettingsAsBSON["$_resumeAfter"]["n"].numberInt();
- }
+ nToSkip = nToSkipFromResumeAfter(querySettingsAsBSON.getField("$_resumeAfter").Obj());
}
bool provideResumeToken = false;
@@ -137,39 +197,14 @@ std::unique_ptr<mongo::DBClientCursor> MockDBClientConnection::query(
provideResumeToken = true;
}
- // Resume query.
- if (nToSkip != 0) {
- BSONObjIterator iter(result);
- BSONArrayBuilder builder;
- auto numExamined = 0;
-
- while (iter.more()) {
- numExamined++;
-
- if (numExamined < nToSkip + 1) {
- iter.next();
- continue;
- }
-
- builder.append(iter.next().Obj());
- }
- resultsInCursor = BSONArray(builder.obj());
- } else {
- // Yield all results instead (default).
- resultsInCursor = BSONArray(result.copy());
- }
- std::unique_ptr<mongo::DBClientCursor> cursor;
- cursor.reset(new DBClientMockCursor(
- this, BSONArray(resultsInCursor), provideResumeToken, batchSize));
- return cursor;
+ return bsonArrayToCursor(std::move(result), nToSkip, provideResumeToken, batchSize);
} catch (const mongo::DBException&) {
_failed.store(true);
throw;
}
- std::unique_ptr<mongo::DBClientCursor> nullPtr;
- return nullPtr;
+ return nullptr;
}
mongo::ConnectionString::ConnectionType MockDBClientConnection::type() const {
diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.h b/src/mongo/dbtests/mock/mock_dbclient_connection.h
index 3160f34e060..4c44296d045 100644
--- a/src/mongo/dbtests/mock/mock_dbclient_connection.h
+++ b/src/mongo/dbtests/mock/mock_dbclient_connection.h
@@ -120,6 +120,9 @@ public:
using DBClientBase::runCommandWithTarget;
std::pair<rpc::UniqueReply, DBClientBase*> runCommandWithTarget(OpMsgRequest request) override;
+ std::unique_ptr<DBClientCursor> find(FindCommandRequest findRequest,
+ const ReadPreferenceSetting& readPref) override;
+
std::unique_ptr<mongo::DBClientCursor> query(
const NamespaceStringOrUUID& nsOrUuid,
const BSONObj& filter = BSONObj{},
@@ -203,6 +206,11 @@ public:
private:
void checkConnection() override;
+ std::unique_ptr<DBClientCursor> bsonArrayToCursor(BSONArray results,
+ int nToSkip,
+ bool provideResumeToken,
+ int batchSize);
+
MockRemoteDBServer::InstanceID _remoteServerInstanceID;
MockRemoteDBServer* const _remoteServer;
uint64_t _sockCreationTime;
diff --git a/src/mongo/dbtests/mock/mock_remote_db_server.cpp b/src/mongo/dbtests/mock/mock_remote_db_server.cpp
index 7b7031aa9a4..90ea117509d 100644
--- a/src/mongo/dbtests/mock/mock_remote_db_server.cpp
+++ b/src/mongo/dbtests/mock/mock_remote_db_server.cpp
@@ -195,16 +195,9 @@ BSONObj MockRemoteDBServer::project(projection_executor::ProjectionExecutor* pro
return projectedDoc.toBson().getOwned();
}
-mongo::BSONArray MockRemoteDBServer::query(MockRemoteDBServer::InstanceID id,
- const NamespaceStringOrUUID& nsOrUuid,
- const BSONObj& filter,
- const Query& querySettings,
- int limit,
- int nToSkip,
- const BSONObj* fieldsToReturn,
- int queryOptions,
- int batchSize,
- boost::optional<BSONObj> readConcernObj) {
+mongo::BSONArray MockRemoteDBServer::findImpl(InstanceID id,
+ const NamespaceStringOrUUID& nsOrUuid,
+ BSONObj projection) {
checkIfUp(id);
if (_delayMilliSec > 0) {
@@ -214,8 +207,8 @@ mongo::BSONArray MockRemoteDBServer::query(MockRemoteDBServer::InstanceID id,
checkIfUp(id);
std::unique_ptr<projection_executor::ProjectionExecutor> projectionExecutor;
- if (fieldsToReturn) {
- projectionExecutor = createProjectionExecutor(*fieldsToReturn);
+ if (!projection.isEmpty()) {
+ projectionExecutor = createProjectionExecutor(projection);
}
scoped_spinlock sLock(_lock);
_queryCount++;
@@ -230,6 +223,25 @@ mongo::BSONArray MockRemoteDBServer::query(MockRemoteDBServer::InstanceID id,
return BSONArray(result.obj());
}
+mongo::BSONArray MockRemoteDBServer::find(MockRemoteDBServer::InstanceID id,
+ const FindCommandRequest& findRequest) {
+ return findImpl(id, findRequest.getNamespaceOrUUID(), findRequest.getProjection());
+}
+
+mongo::BSONArray MockRemoteDBServer::query(MockRemoteDBServer::InstanceID id,
+ const NamespaceStringOrUUID& nsOrUuid,
+ const BSONObj& filter,
+ const Query& querySettings,
+ int limit,
+ int nToSkip,
+ const BSONObj* fieldsToReturn,
+ int queryOptions,
+ int batchSize,
+ boost::optional<BSONObj> readConcernObj) {
+ BSONObj projection = fieldsToReturn ? *fieldsToReturn : BSONObj{};
+ return findImpl(id, nsOrUuid, std::move(projection));
+}
+
mongo::ConnectionString::ConnectionType MockRemoteDBServer::type() const {
return mongo::ConnectionString::ConnectionType::kCustom;
}
diff --git a/src/mongo/dbtests/mock/mock_remote_db_server.h b/src/mongo/dbtests/mock/mock_remote_db_server.h
index 7ae742f769c..591b506a170 100644
--- a/src/mongo/dbtests/mock/mock_remote_db_server.h
+++ b/src/mongo/dbtests/mock/mock_remote_db_server.h
@@ -35,6 +35,7 @@
#include "mongo/client/connection_string.h"
#include "mongo/client/query.h"
#include "mongo/db/jsobj.h"
+#include "mongo/db/query/find_command_gen.h"
#include "mongo/rpc/unique_message.h"
#include "mongo/stdx/unordered_map.h"
#include "mongo/util/concurrency/spin_lock.h"
@@ -162,6 +163,14 @@ public:
//
rpc::UniqueReply runCommand(InstanceID id, const OpMsgRequest& request);
+ /**
+ * Finds documents from this mock server according to 'findRequest'.
+ */
+ mongo::BSONArray find(InstanceID id, const FindCommandRequest& findRequest);
+
+ /**
+ * Legacy query API: New callers should use 'find()' rather than this method.
+ */
mongo::BSONArray query(InstanceID id,
const NamespaceStringOrUUID& nsOrUuid,
const BSONObj& filter,
@@ -234,6 +243,13 @@ private:
*/
BSONObj project(projection_executor::ProjectionExecutor* projectionExecutor, const BSONObj& o);
+ /**
+ * Logic shared between 'find()' and 'query()'. This can go away when the legacy 'query()' API
+ * is removed.
+ */
+ mongo::BSONArray findImpl(InstanceID id,
+ const NamespaceStringOrUUID& nsOrUuid,
+ BSONObj projection);
typedef stdx::unordered_map<std::string, std::shared_ptr<CircularBSONIterator>> CmdToReplyObj;
typedef stdx::unordered_map<std::string, std::vector<BSONObj>> MockDataMgr;
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 458180e9aa8..4fa533f350a 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -278,13 +278,17 @@ public:
}
void run() {
const char* ns = "unittests.querytests.BoundedKey";
+ NamespaceString namespaceStr{ns};
insert(ns, BSON("a" << 1));
BSONObjBuilder a;
a.appendMaxKey("$lt");
BSONObj limit = a.done();
- ASSERT(!_client.findOne(ns, BSON("a" << limit)).isEmpty());
+ ASSERT(!_client.findOne(namespaceStr, BSON("a" << limit)).isEmpty());
ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1)));
- ASSERT(!_client.findOne(ns, BSON("a" << limit), Query().hint(BSON("a" << 1))).isEmpty());
+ FindCommandRequest findCmd{namespaceStr};
+ findCmd.setFilter(BSON("a" << limit));
+ findCmd.setHint(BSON("a" << 1));
+ ASSERT(!_client.findOne(std::move(findCmd)).isEmpty());
}
};
@@ -298,11 +302,10 @@ public:
insert(ns, BSON("a" << 1));
insert(ns, BSON("a" << 2));
insert(ns, BSON("a" << 3));
- unique_ptr<DBClientCursor> cursor =
- _client.query(NamespaceString(ns), BSONObj{}, Query(), 0, 0, nullptr, 0, 2);
+ FindCommandRequest findRequest{NamespaceString{ns}};
+ findRequest.setBatchSize(2);
+ std::unique_ptr<DBClientCursor> cursor = _client.find(findRequest);
long long cursorId = cursor->getCursorId();
- cursor->decouple();
- cursor.reset();
{
// Check that a cursor has been registered with the global cursor manager, and has
@@ -312,9 +315,11 @@ public:
ASSERT_EQUALS(std::uint64_t(2), pinnedCursor.getCursor()->nReturnedSoFar());
}
- cursor = _client.getMore(ns, cursorId);
- ASSERT(cursor->more());
- ASSERT_EQUALS(3, cursor->next().getIntField("a"));
+ int counter = 0;
+ while (cursor->more()) {
+ ASSERT_EQUALS(++counter, cursor->next().getIntField("a"));
+ }
+ ASSERT_EQ(counter, 3);
}
};
@@ -335,8 +340,9 @@ public:
}
// Create a cursor on the collection, with a batch size of 200.
- unique_ptr<DBClientCursor> cursor =
- _client.query(NamespaceString(ns), BSONObj{}, Query(), 0, 0, nullptr, 0, 200);
+ FindCommandRequest findRequest{NamespaceString{ns}};
+ findRequest.setBatchSize(200);
+ auto cursor = _client.find(std::move(findRequest));
// Count 500 results, spanning a few batches of documents.
for (int i = 0; i < 500; ++i) {
@@ -381,8 +387,9 @@ public:
}
// Create a cursor on the collection, with a batch size of 200.
- unique_ptr<DBClientCursor> cursor =
- _client.query(NamespaceString(ns), BSONObj{}, Query(), 0, 0, nullptr, 0, 200);
+ FindCommandRequest findRequest{NamespaceString{ns}};
+ findRequest.setBatchSize(200);
+ auto cursor = _client.find(std::move(findRequest));
CursorId cursorId = cursor->getCursorId();
// Count 500 results, spanning a few batches of documents.
@@ -423,23 +430,24 @@ public:
_client.dropCollection(ns);
}
- void testLimit(int limit) {
- ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj{}, Query(), limit)->itcount(),
- limit);
+ void testLimit(int limit, int expectedCount) {
+ FindCommandRequest findRequest{NamespaceString{ns}};
+ findRequest.setLimit(limit);
+ ASSERT_EQUALS(_client.find(std::move(findRequest))->itcount(), expectedCount);
}
+
void run() {
- for (int i = 0; i < 1000; i++)
+ const int collSize = 1000;
+ for (int i = 0; i < collSize; i++)
insert(ns, BSON(GENOID << "i" << i));
- ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj{}, Query(), 1)->itcount(), 1);
- ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj{}, Query(), 10)->itcount(), 10);
- ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj{}, Query(), 101)->itcount(), 101);
- ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj{}, Query(), 999)->itcount(), 999);
- ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj{}, Query(), 1000)->itcount(),
- 1000);
- ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj{}, Query(), 1001)->itcount(),
- 1000);
- ASSERT_EQUALS(_client.query(NamespaceString(ns), BSONObj{}, Query(), 0)->itcount(), 1000);
+ testLimit(1, 1);
+ testLimit(10, 10);
+ testLimit(101, 101);
+ testLimit(collSize - 1, collSize - 1);
+ testLimit(collSize, collSize);
+ testLimit(collSize + 1, collSize);
+ testLimit(collSize + 10, collSize);
}
};
@@ -850,11 +858,12 @@ public:
}
void run() {
const char* ns = "unittests.querytests._UnderscoreNs";
- ASSERT(_client.findOne(ns, BSONObj{}).isEmpty());
+ NamespaceString nss{ns};
+ ASSERT(_client.findOne(nss, BSONObj{}).isEmpty());
auto response = _client.insertAcknowledged(ns, {BSON("a" << 1)});
ASSERT_OK(getStatusFromWriteCommandReply(response));
ASSERT_EQ(1, response["n"].Int());
- ASSERT_EQUALS(1, _client.findOne(ns, BSONObj{}).getIntField("a"));
+ ASSERT_EQUALS(1, _client.findOne(nss, BSONObj{}).getIntField("a"));
}
};
@@ -865,10 +874,12 @@ public:
}
void run() {
const char* ns = "unittests.querytests.EmptyFieldSpec";
+ NamespaceString nss{ns};
_client.insert(ns, BSON("a" << 1));
- ASSERT(!_client.findOne(ns, BSONObj{}).isEmpty());
- BSONObj empty;
- ASSERT(!_client.findOne(ns, BSONObj{}, Query(), &empty).isEmpty());
+ ASSERT(!_client.findOne(nss, BSONObj{}).isEmpty());
+ FindCommandRequest findCmd{nss};
+ findCmd.setProjection(BSONObj{});
+ ASSERT(!_client.findOne(std::move(findCmd)).isEmpty());
}
};
@@ -879,10 +890,11 @@ public:
}
void run() {
const char* ns = "unittests.querytests.Ne";
+ NamespaceString nss{ns};
_client.insert(ns, fromjson("{a:[1,2]}"));
- ASSERT(_client.findOne(ns, fromjson("{a:{$ne:1}}")).isEmpty());
+ ASSERT(_client.findOne(nss, fromjson("{a:{$ne:1}}")).isEmpty());
BSONObj spec = fromjson("{a:{$ne:1,$ne:2}}");
- ASSERT(_client.findOne(ns, spec).isEmpty());
+ ASSERT(_client.findOne(nss, spec).isEmpty());
}
};
@@ -893,8 +905,9 @@ public:
}
void run() {
const char* ns = "unittests.querytests.NestedNe";
+ NamespaceString nss{ns};
_client.insert(ns, fromjson("{a:[{b:1},{b:2}]}"));
- ASSERT(_client.findOne(ns, fromjson("{'a.b':{$ne:1}}")).isEmpty());
+ ASSERT(_client.findOne(nss, fromjson("{'a.b':{$ne:1}}")).isEmpty());
}
};
@@ -905,10 +918,11 @@ public:
}
void run() {
const char* ns = "unittests.querytests.NumericEmbedded";
+ NamespaceString nss{ns};
_client.insert(ns, BSON("a" << BSON("b" << 1)));
- ASSERT(!_client.findOne(ns, BSON("a" << BSON("b" << 1.0))).isEmpty());
+ ASSERT(!_client.findOne(nss, BSON("a" << BSON("b" << 1.0))).isEmpty());
ASSERT_OK(dbtests::createIndex(&_opCtx, ns, BSON("a" << 1)));
- ASSERT(!_client.findOne(ns, BSON("a" << BSON("b" << 1.0))).isEmpty());
+ ASSERT(!_client.findOne(nss, BSON("a" << BSON("b" << 1.0))).isEmpty());
}
};
@@ -991,9 +1005,10 @@ public:
}
void run() {
const char* ns = "unittests.querytests.SubobjectInArray";
+ NamespaceString nss{ns};
_client.insert(ns, fromjson("{a:[{b:{c:1}}]}"));
- ASSERT(!_client.findOne(ns, BSON("a.b.c" << 1)).isEmpty());
- ASSERT(!_client.findOne(ns, fromjson("{'a.c':null}")).isEmpty());
+ ASSERT(!_client.findOne(nss, BSON("a.b.c" << 1)).isEmpty());
+ ASSERT(!_client.findOne(nss, fromjson("{'a.c':null}")).isEmpty());
}
};
@@ -1329,26 +1344,24 @@ public:
b.append("z", 17);
_client.insert(ns(), b.obj());
}
- ASSERT_EQUALS(17, _client.findOne(ns(), BSONObj{})["z"].number());
+ ASSERT_EQUALS(17, _client.findOne(nss(), BSONObj{})["z"].number());
{
BSONObjBuilder b;
b.appendSymbol("x", "eliot");
- ASSERT_EQUALS(17, _client.findOne(ns(), b.obj())["z"].number());
+ ASSERT_EQUALS(17, _client.findOne(nss(), b.obj())["z"].number());
}
ASSERT_EQUALS(17,
_client
- .findOne(ns(),
+ .findOne(nss(),
BSON("x"
- << "eliot"),
- Query())["z"]
+ << "eliot"))["z"]
.number());
ASSERT_OK(dbtests::createIndex(&_opCtx, ns(), BSON("x" << 1)));
ASSERT_EQUALS(17,
_client
- .findOne(ns(),
+ .findOne(nss(),
BSON("x"
- << "eliot"),
- Query())["z"]
+ << "eliot"))["z"]
.number());
}
};
diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp
index 38274e3021f..7013ed413f5 100644
--- a/src/mongo/dbtests/repltests.cpp
+++ b/src/mongo/dbtests/repltests.cpp
@@ -178,7 +178,7 @@ protected:
return "local.oplog.rs";
}
BSONObj one(const BSONObj& query = BSONObj()) const {
- return _client.findOne(ns(), query);
+ return _client.findOne(nss(), query);
}
void checkOne(const BSONObj& o) const {
check(o, one(o));
@@ -375,10 +375,10 @@ public:
b.append("a", 1);
b.appendTimestamp("t");
_client.insert(ns(), b.done());
- date_ = _client.findOne(ns(), BSON("a" << 1)).getField("t").date();
+ date_ = _client.findOne(nss(), BSON("a" << 1)).getField("t").date();
}
void check() const {
- BSONObj o = _client.findOne(ns(), BSON("a" << 1));
+ BSONObj o = _client.findOne(nss(), BSON("a" << 1));
ASSERT(Date_t{} != o.getField("t").date());
ASSERT_EQUALS(date_, o.getField("t").date());
}
@@ -466,10 +466,10 @@ public:
b.append("_id", 1);
b.appendTimestamp("t");
_client.update(ns(), BSON("_id" << 1), b.done());
- date_ = _client.findOne(ns(), BSON("_id" << 1)).getField("t").date();
+ date_ = _client.findOne(nss(), BSON("_id" << 1)).getField("t").date();
}
void check() const {
- BSONObj o = _client.findOne(ns(), BSON("_id" << 1));
+ BSONObj o = _client.findOne(nss(), BSON("_id" << 1));
ASSERT(Date_t{} != o.getField("t").date());
ASSERT_EQUALS(date_, o.getField("t").date());
}
@@ -494,8 +494,8 @@ public:
}
void check() const {
ASSERT_EQUALS(2, count());
- ASSERT(!_client.findOne(ns(), q_).isEmpty());
- ASSERT(!_client.findOne(ns(), u_).isEmpty());
+ ASSERT(!_client.findOne(nss(), q_).isEmpty());
+ ASSERT(!_client.findOne(nss(), u_).isEmpty());
}
void reset() const {
deleteAll(ns());
@@ -518,8 +518,8 @@ public:
}
void check() const {
ASSERT_EQUALS(2, count());
- ASSERT(!_client.findOne(ns(), q_).isEmpty());
- ASSERT(!_client.findOne(ns(), u_).isEmpty());
+ ASSERT(!_client.findOne(nss(), q_).isEmpty());
+ ASSERT(!_client.findOne(nss(), u_).isEmpty());
}
void reset() const {
deleteAll(ns());
@@ -731,7 +731,7 @@ public:
}
void check() const {
ASSERT_EQUALS(2, count());
- ASSERT(!_client.findOne(ns(), ou_).isEmpty());
+ ASSERT(!_client.findOne(nss(), ou_).isEmpty());
}
void reset() const {
deleteAll(ns());
@@ -751,7 +751,7 @@ public:
}
void check() const {
ASSERT_EQUALS(1, count());
- ASSERT(!_client.findOne(ns(), ou_).isEmpty());
+ ASSERT(!_client.findOne(nss(), ou_).isEmpty());
}
void reset() const {
deleteAll(ns());
diff --git a/src/mongo/dbtests/updatetests.cpp b/src/mongo/dbtests/updatetests.cpp
index 474402e99f8..713f58e0d39 100644
--- a/src/mongo/dbtests/updatetests.cpp
+++ b/src/mongo/dbtests/updatetests.cpp
@@ -149,9 +149,13 @@ public:
}
protected:
- const char* ns() {
+ const char* ns() const {
return "unittests.updatetests.SetBase";
}
+
+ NamespaceString nss() const {
+ return NamespaceString{ns()};
+ }
};
class SetNum : public SetBase {
@@ -159,7 +163,7 @@ public:
void run() {
_client.insert(ns(), BSON("a" << 1));
_client.update(ns(), BSON("a" << 1), BSON("$set" << BSON("a" << 4)));
- ASSERT(!_client.findOne(ns(), BSON("a" << 4)).isEmpty());
+ ASSERT(!_client.findOne(nss(), BSON("a" << 4)).isEmpty());
}
};
@@ -175,10 +179,9 @@ public:
BSON("$set" << BSON("a"
<< "c")));
ASSERT(!_client
- .findOne(ns(),
+ .findOne(nss(),
BSON("a"
- << "c"),
- Query())
+ << "c"))
.isEmpty());
}
};
@@ -195,10 +198,9 @@ public:
BSON("$set" << BSON("a"
<< "cd")));
ASSERT(!_client
- .findOne(ns(),
+ .findOne(nss(),
BSON("a"
- << "cd"),
- Query())
+ << "cd"))
.isEmpty());
}
};
@@ -210,7 +212,7 @@ public:
BSON("a"
<< "b"));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a" << 5)));
- ASSERT(!_client.findOne(ns(), BSON("a" << 5)).isEmpty());
+ ASSERT(!_client.findOne(nss(), BSON("a" << 5)).isEmpty());
}
};
@@ -221,7 +223,7 @@ public:
BSON("a"
<< "bcd"));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a" << 5.0)));
- ASSERT(!_client.findOne(ns(), BSON("a" << 5.0)).isEmpty());
+ ASSERT(!_client.findOne(nss(), BSON("a" << 5.0)).isEmpty());
}
};
@@ -231,11 +233,11 @@ public:
// Try with upsert false first.
_client.insert(ns(), BSONObj() /* empty document */);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$setOnInsert" << BSON("a" << 1)), false);
- ASSERT(_client.findOne(ns(), BSON("a" << 1)).isEmpty());
+ ASSERT(_client.findOne(nss(), BSON("a" << 1)).isEmpty());
// Then with upsert true.
_client.update(ns(), BSONObj{} /*filter*/, BSON("$setOnInsert" << BSON("a" << 1)), true);
- ASSERT(_client.findOne(ns(), BSON("a" << 1)).isEmpty());
+ ASSERT(_client.findOne(nss(), BSON("a" << 1)).isEmpty());
}
};
@@ -244,11 +246,11 @@ public:
void run() {
// Try with upsert false first.
_client.update(ns(), BSONObj{} /*filter*/, BSON("$setOnInsert" << BSON("a" << 1)), false);
- ASSERT(_client.findOne(ns(), BSON("a" << 1)).isEmpty());
+ ASSERT(_client.findOne(nss(), BSON("a" << 1)).isEmpty());
// Then with upsert true.
_client.update(ns(), BSONObj{} /*filter*/, BSON("$setOnInsert" << BSON("a" << 1)), true);
- ASSERT(!_client.findOne(ns(), BSON("a" << 1)).isEmpty());
+ ASSERT(!_client.findOne(nss(), BSON("a" << 1)).isEmpty());
}
};
@@ -257,11 +259,11 @@ public:
void run() {
// Try with upsert false first.
_client.update(ns(), BSON("a" << 1), BSON("$setOnInsert" << BSON("b" << 1)), false);
- ASSERT(_client.findOne(ns(), BSON("a" << 1)).isEmpty());
+ ASSERT(_client.findOne(nss(), BSON("a" << 1)).isEmpty());
// Then with upsert true.
_client.update(ns(), BSON("a" << 1), BSON("$setOnInsert" << BSON("b" << 1)), true);
- ASSERT(!_client.findOne(ns(), BSON("a" << 1 << "b" << 1)).isEmpty());
+ ASSERT(!_client.findOne(nss(), BSON("a" << 1 << "b" << 1)).isEmpty());
}
};
@@ -270,11 +272,11 @@ public:
void run() {
// Try with upsert false first.
_client.update(ns(), BSON("a" << 1), BSON("$setOnInsert" << BSON("a" << 2)), false);
- ASSERT(_client.findOne(ns(), BSON("a" << 1)).isEmpty());
+ ASSERT(_client.findOne(nss(), BSON("a" << 1)).isEmpty());
// Then with upsert true.
_client.update(ns(), BSON("a" << 1), BSON("$setOnInsert" << BSON("a" << 2)), true);
- ASSERT(!_client.findOne(ns(), BSON("a" << 2)).isEmpty());
+ ASSERT(!_client.findOne(nss(), BSON("a" << 2)).isEmpty());
}
};
@@ -284,7 +286,7 @@ public:
BSONObj res = fromjson("{'_id':0, a:1}");
_client.insert(ns(), res);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$setOnInsert" << BSON("b" << 1)));
- ASSERT(_client.findOne(ns(), BSON("a" << 1)).woCompare(res) == 0);
+ ASSERT(_client.findOne(nss(), BSON("a" << 1)).woCompare(res) == 0);
}
};
@@ -293,7 +295,7 @@ public:
void run() {
_client.insert(ns(), BSON("a" << 1));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$setOnInsert" << BSON("a" << 2)));
- ASSERT(!_client.findOne(ns(), BSON("a" << 1)).isEmpty());
+ ASSERT(!_client.findOne(nss(), BSON("a" << 1)).isEmpty());
}
};
@@ -305,14 +307,14 @@ public:
BSONObj{} /*filter*/,
BSON("$set" << BSON("a" << 1) << "$setOnInsert" << BSON("b" << 2)),
false);
- ASSERT(_client.findOne(ns(), BSON("a" << 1 << "b" << 2)).isEmpty());
+ ASSERT(_client.findOne(nss(), BSON("a" << 1 << "b" << 2)).isEmpty());
// Then with upsert true.
_client.update(ns(),
BSONObj{} /*filter*/,
BSON("$set" << BSON("a" << 1) << "$setOnInsert" << BSON("b" << 2)),
true);
- ASSERT(!_client.findOne(ns(), BSON("a" << 1 << "b" << 2)).isEmpty());
+ ASSERT(!_client.findOne(nss(), BSON("a" << 1 << "b" << 2)).isEmpty());
}
};
@@ -326,7 +328,7 @@ public:
_client.insert(ns(), initial);
_client.update(
ns(), initial, BSON("$setOnInsert" << BSON("a.b" << 1) << "$set" << BSON("d" << 1)));
- ASSERT_BSONOBJ_EQ(_client.findOne(ns(), initial), final);
+ ASSERT_BSONOBJ_EQ(_client.findOne(nss(), initial), final);
}
};
@@ -335,9 +337,9 @@ public:
void run() {
_client.insert(ns(), fromjson("{a:{b:4}}"));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$inc" << BSON("a.b" << 10)));
- ASSERT(!_client.findOne(ns(), BSON("a.b" << 14)).isEmpty());
+ ASSERT(!_client.findOne(nss(), BSON("a.b" << 14)).isEmpty());
_client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a.b" << 55)));
- ASSERT(!_client.findOne(ns(), BSON("a.b" << 55)).isEmpty());
+ ASSERT(!_client.findOne(nss(), BSON("a.b" << 55)).isEmpty());
}
};
@@ -350,10 +352,9 @@ public:
BSON("$set" << BSON("a.b"
<< "llll")));
ASSERT(!_client
- .findOne(ns(),
+ .findOne(nss(),
BSON("a.b"
- << "llll"),
- Query())
+ << "llll"))
.isEmpty());
}
};
@@ -367,10 +368,9 @@ public:
BSON("$set" << BSON("a.b"
<< "lllll")));
ASSERT(_client
- .findOne(ns(),
+ .findOne(nss(),
BSON("a.b"
- << "lllll"),
- Query())
+ << "lllll"))
.woCompare(fromjson("{'_id':0,a:{b:'lllll'}}")) == 0);
}
};
@@ -384,10 +384,9 @@ public:
BSON("$set" << BSON("a.b"
<< "lllll")));
ASSERT(_client
- .findOne(ns(),
+ .findOne(nss(),
BSON("a.b"
- << "lllll"),
- Query())
+ << "lllll"))
.woCompare(fromjson("{'_id':0,a:{b:'lllll'}}")) == 0);
}
};
@@ -400,10 +399,9 @@ public:
BSONObj{} /*filter*/,
BSON("$set" << BSON("a.b"
<< "lllll")));
- ASSERT_EQUALS(mutablebson::unordered(_client.findOne(ns(),
+ ASSERT_EQUALS(mutablebson::unordered(_client.findOne(nss(),
BSON("a.b"
- << "lllll"),
- Query())),
+ << "lllll"))),
mutablebson::unordered(fromjson("{'_id':0,a:{b:'lllll',c:4}}")));
}
};
@@ -413,7 +411,7 @@ public:
void run() {
_client.insert(ns(), fromjson("{'_id':0}"));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$inc" << BSON("f" << 3.0)));
- ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,f:3}")) ==
+ ASSERT(_client.findOne(nss(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,f:3}")) ==
0);
}
};
@@ -460,7 +458,7 @@ public:
_client.insert(ns(), fromjson("{'_id':0}"));
_client.update(
ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("f.g.h" << 3.0 << "f.g.a" << 2.0)));
- ASSERT_EQUALS(mutablebson::unordered(_client.findOne(ns(), BSONObj{} /*filter*/)),
+ ASSERT_EQUALS(mutablebson::unordered(_client.findOne(nss(), BSONObj{} /*filter*/)),
mutablebson::unordered(fromjson("{'_id':0,f:{g:{a:2,h:3}}}")));
}
};
@@ -470,7 +468,7 @@ public:
void run() {
_client.insert(ns(), fromjson("{'_id':0}"));
_client.update(ns(), BSONObj(), BSON("$set" << BSON("f.g.h.b" << 3.0 << "f.g.a.b" << 2.0)));
- ASSERT_EQUALS(mutablebson::unordered(_client.findOne(ns(), BSONObj{} /*filter*/)),
+ ASSERT_EQUALS(mutablebson::unordered(_client.findOne(nss(), BSONObj{} /*filter*/)),
mutablebson::unordered(fromjson("{'_id':0,f:{g:{a:{b:2},h:{b:3}}}}")));
}
};
@@ -483,7 +481,7 @@ public:
BSONObj{} /*filter*/,
BSON("$set" << BSON("z.0"
<< "a")));
- ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/),
+ ASSERT_BSONOBJ_EQ(_client.findOne(nss(), BSONObj{} /*filter*/),
fromjson("{'_id':0,z:['a','b']}"));
}
};
@@ -493,7 +491,7 @@ public:
void run() {
_client.insert(ns(), fromjson("{'_id':0,a:1}"));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a.b" << 1)));
- ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:1}")) ==
+ ASSERT(_client.findOne(nss(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:1}")) ==
0);
}
};
@@ -503,7 +501,7 @@ public:
void run() {
_client.insert(ns(), fromjson("{'_id':0}"));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a" << 2 << "a.b" << 1)));
- ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), fromjson("{'_id':0}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(nss(), BSONObj{} /*filter*/), fromjson("{'_id':0}"));
}
};
@@ -513,7 +511,7 @@ public:
_client.insert(ns(), fromjson("{'_id':0,a:{b:2}}"));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a" << 2 << "a.b" << 1)));
ASSERT(
- _client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:{b:2}}")) ==
+ _client.findOne(nss(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:{b:2}}")) ==
0);
}
};
@@ -524,7 +522,7 @@ public:
_client.insert(ns(), fromjson("{'_id':0,a:{b:2}}"));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a" << BSON("c" << 2))));
ASSERT(
- _client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:{c:2}}")) ==
+ _client.findOne(nss(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:{c:2}}")) ==
0);
}
};
@@ -541,7 +539,7 @@ class UpsertMissingEmbedded : public SetBase {
public:
void run() {
_client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a.b" << 1)), true);
- ASSERT(!_client.findOne(ns(), BSON("a.b" << 1)).isEmpty());
+ ASSERT(!_client.findOne(nss(), BSON("a.b" << 1)).isEmpty());
}
};
@@ -550,7 +548,7 @@ public:
void run() {
_client.insert(ns(), fromjson("{'_id':0,a:[1]}"));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << 5)));
- ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/),
+ ASSERT_BSONOBJ_EQ(_client.findOne(nss(), BSONObj{} /*filter*/),
fromjson("{'_id':0,a:[1,5]}"));
}
};
@@ -560,7 +558,7 @@ public:
void run() {
_client.insert(ns(), fromjson("{'_id':0,a:1}"));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << 5)));
- ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:1}")) ==
+ ASSERT(_client.findOne(nss(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:1}")) ==
0);
}
};
@@ -572,8 +570,9 @@ public:
_client.update(ns(),
BSONObj{} /*filter*/,
BSON("$set" << BSON("a" << 1) << "$push" << BSON("a" << 5)));
- ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[1]}")) ==
- 0);
+ ASSERT(
+ _client.findOne(nss(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[1]}")) ==
+ 0);
}
};
@@ -582,7 +581,8 @@ public:
void run() {
_client.insert(ns(), fromjson("{'_id':0}"));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << 5)));
- ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), fromjson("{'_id':0,a:[5]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(nss(), BSONObj{} /*filter*/),
+ fromjson("{'_id':0,a:[5]}"));
}
};
@@ -591,8 +591,9 @@ public:
void run() {
_client.insert(ns(), fromjson("{'_id':0,a:[]}"));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << 5)));
- ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[5]}")) ==
- 0);
+ ASSERT(
+ _client.findOne(nss(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[5]}")) ==
+ 0);
}
};
@@ -601,7 +602,7 @@ public:
void run() {
_client.insert(ns(), fromjson("{'_id':0}"));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a.b" << 5)));
- ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/)
+ ASSERT(_client.findOne(nss(), BSONObj{} /*filter*/)
.woCompare(fromjson("{'_id':0,a:{b:[5]}}")) == 0);
}
};
@@ -613,7 +614,7 @@ public:
_client.update(ns(),
BSONObj{} /*filter*/,
BSON("$set" << BSON("a" << BSONObj()) << "$push" << BSON("a.b" << 5)));
- ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0}")) == 0);
+ ASSERT(_client.findOne(nss(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0}")) == 0);
}
};
@@ -624,7 +625,7 @@ public:
_client.update(ns(),
BSONObj{} /*filter*/,
BSON("$push" << BSON("a" << 4) << "$push" << BSON("a" << 5)));
- ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[]}")) ==
+ ASSERT(_client.findOne(nss(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[]}")) ==
0);
}
};
@@ -635,7 +636,7 @@ public:
_client.insert(ns(), fromjson("{'_id':0,a:{b:4}}"));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a.b.c" << 4.0)));
ASSERT(
- _client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:{b:4}}")) ==
+ _client.findOne(nss(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:{b:4}}")) ==
0);
}
};
@@ -646,7 +647,7 @@ public:
_client.insert(ns(), fromjson("{'_id':0,a:{b:4}}"));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << 4.0)));
ASSERT(
- _client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:{b:4}}")) ==
+ _client.findOne(nss(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:{b:4}}")) ==
0);
}
};
@@ -658,7 +659,7 @@ public:
// { $push : { a : { $each : [ 2, 3 ] } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(2 << 3));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj)));
- ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/),
+ ASSERT_BSONOBJ_EQ(_client.findOne(nss(), BSONObj{} /*filter*/),
fromjson("{'_id':0,a:[1,2,3]}"));
}
};
@@ -670,7 +671,7 @@ public:
// { $push : { a : { $each : [ 1, 2, 3 ] } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(1 << 2 << 3));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj)));
- ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/),
+ ASSERT_BSONOBJ_EQ(_client.findOne(nss(), BSONObj{} /*filter*/),
fromjson("{'_id':0,a:[1,2,3]}"));
}
};
@@ -682,7 +683,7 @@ public:
// { $push : { a : { $each : [ 2 ] , $slice : -3 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(2) << "$slice" << -3);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj)));
- ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/),
+ ASSERT_BSONOBJ_EQ(_client.findOne(nss(), BSONObj{} /*filter*/),
fromjson("{'_id':0,a:[1,2]}"));
}
};
@@ -694,7 +695,7 @@ public:
// { $push : { a : { $each : [ 2 ] , $slice : -2 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(2) << "$slice" << -2);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj)));
- ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/),
+ ASSERT_BSONOBJ_EQ(_client.findOne(nss(), BSONObj{} /*filter*/),
fromjson("{'_id':0,a:[1,2]}"));
}
};
@@ -706,7 +707,7 @@ public:
// { $push : { a : { $each : [ 2 , 3 ] , $slice : -2 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(2 << 3) << "$slice" << -2);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj)));
- ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/),
+ ASSERT_BSONOBJ_EQ(_client.findOne(nss(), BSONObj{} /*filter*/),
fromjson("{'_id':0,a:[2,3]}"));
}
};
@@ -718,7 +719,7 @@ public:
// { $push : { a : { $each : [ 3 ] , $slice : -2 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << -2);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj)));
- ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/),
+ ASSERT_BSONOBJ_EQ(_client.findOne(nss(), BSONObj{} /*filter*/),
fromjson("{'_id':0,a:[2,3]}"));
}
};
@@ -730,7 +731,7 @@ public:
// { $push : { a : { $each : [ 3 ] , $slice : 0 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << 0);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj)));
- ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), fromjson("{'_id':0,a:[]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(nss(), BSONObj{} /*filter*/), fromjson("{'_id':0,a:[]}"));
}
};
@@ -741,7 +742,7 @@ public:
// { $push : { a : { $each : [ 3 ] , $slice : 0 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << 0);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj)));
- ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), fromjson("{'_id':0,a:[]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(nss(), BSONObj{} /*filter*/), fromjson("{'_id':0,a:[]}"));
}
};
@@ -752,7 +753,7 @@ public:
// { $push : { a : { $each : [ 1 , 2 ] , $slice : -3 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(1 << 2) << "$slice" << -3);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj)));
- ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/),
+ ASSERT_BSONOBJ_EQ(_client.findOne(nss(), BSONObj{} /*filter*/),
fromjson("{'_id':0,a:[1,2]}"));
}
};
@@ -764,7 +765,7 @@ public:
// { $push : { a : { $each : [ 1 , 2 , 3 ] , $slice : -2 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(1 << 2 << 3) << "$slice" << -2);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj)));
- ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/),
+ ASSERT_BSONOBJ_EQ(_client.findOne(nss(), BSONObj{} /*filter*/),
fromjson("{'_id':0,a:[2,3]}"));
}
};
@@ -776,7 +777,8 @@ public:
// { $push : { a : { $each : [ 1 ] , $slice : -3 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(1) << "$slice" << -3);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj)));
- ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/), fromjson("{'_id':0,a:[1]}"));
+ ASSERT_BSONOBJ_EQ(_client.findOne(nss(), BSONObj{} /*filter*/),
+ fromjson("{'_id':0,a:[1]}"));
}
};
@@ -787,7 +789,7 @@ public:
// { $push : { a : { $each : [ 1 , 2 , 3 ] , $slice : -2 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(1 << 2 << 3) << "$slice" << -2);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj)));
- ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/),
+ ASSERT_BSONOBJ_EQ(_client.findOne(nss(), BSONObj{} /*filter*/),
fromjson("{'_id':0,a:[2,3]}"));
}
};
@@ -801,7 +803,7 @@ public:
BSONObj objB = BSON("$each" << BSON_ARRAY(6) << "$slice" << -1);
_client.update(
ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << objA << "b" << objB)));
- ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/),
+ ASSERT_BSONOBJ_EQ(_client.findOne(nss(), BSONObj{} /*filter*/),
fromjson("{'_id':0,a:[2,5],b:[6]}"));
}
};
@@ -813,7 +815,7 @@ public:
// { $push : { a : { $each : [ 5 ] , $slice : -2 } , { b : 4 } }
BSONObj objA = BSON("$each" << BSON_ARRAY(5) << "$slice" << -2);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << objA << "b" << 4)));
- ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/),
+ ASSERT_BSONOBJ_EQ(_client.findOne(nss(), BSONObj{} /*filter*/),
fromjson("{'_id':0,a:[2,5],b:[3,4]}"));
}
};
@@ -827,7 +829,7 @@ public:
BSONObj other = BSON("$each" << BSON_ARRAY(6) << "$slice" << -1);
_client.update(
ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << objA << "a" << other)));
- ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/)
+ ASSERT(_client.findOne(nss(), BSONObj{} /*filter*/)
.woCompare(fromjson("{'_id':0,a:[1],b:[3]}")) == 0);
}
};
@@ -839,7 +841,7 @@ public:
// { $push : { a : { $each : [ 5 ] , $slice : -2 } , { a : 4 } } }
BSONObj objA = BSON("$each" << BSON_ARRAY(5) << "$slice" << -2);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << objA << "a" << 4)));
- ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/)
+ ASSERT(_client.findOne(nss(), BSONObj{} /*filter*/)
.woCompare(fromjson("{'_id':0,a:[1],b:[3]}")) == 0);
}
};
@@ -852,7 +854,7 @@ public:
BSONObj pushObj = BSON("$each" << 3 << "$slice" << -2);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj)));
ASSERT(
- _client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[1,2]}")) ==
+ _client.findOne(nss(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[1,2]}")) ==
0);
}
};
@@ -865,7 +867,7 @@ public:
BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << BSON_ARRAY(-2));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj)));
ASSERT(
- _client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[1,2]}")) ==
+ _client.findOne(nss(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[1,2]}")) ==
0);
}
};
@@ -878,7 +880,7 @@ public:
BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << 2);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj)));
ASSERT(
- _client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[1,2]}")) ==
+ _client.findOne(nss(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[1,2]}")) ==
0);
}
};
@@ -892,7 +894,7 @@ public:
BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << -2.1);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj)));
ASSERT(
- _client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[1,2]}")) ==
+ _client.findOne(nss(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[1,2]}")) ==
0);
}
};
@@ -904,7 +906,7 @@ public:
// { $push : { a : { $each : [ 3 ], $slice : -2.0 } } }
BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$slice" << -2.0);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj)));
- ASSERT_BSONOBJ_EQ(_client.findOne(ns(), BSONObj{} /*filter*/),
+ ASSERT_BSONOBJ_EQ(_client.findOne(nss(), BSONObj{} /*filter*/),
fromjson("{'_id':0,a:[2,3]}"));
}
};
@@ -917,7 +919,7 @@ public:
BSONObj pushObj = BSON("$each" << BSON_ARRAY(3) << "$xxxx" << 2);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("a" << pushObj)));
ASSERT(
- _client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[1,2]}")) ==
+ _client.findOne(nss(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:[1,2]}")) ==
0);
}
};
@@ -949,10 +951,14 @@ protected:
BOTTOMK_DESC = 3
};
- const char* ns() {
+ const char* ns() const {
return "unittest.updatetests.PushSortBase";
}
+ NamespaceString nss() const {
+ return NamespaceString{ns()};
+ }
+
void setParams(const BSONArray& fields,
const BSONArray& values,
const BSONArray& sort,
@@ -1007,8 +1013,8 @@ protected:
void check(BSONObj expected) {
std::cout << expected.toString() << std::endl;
- std::cout << _client.findOne(ns(), BSONObj{} /*filter*/) << std::endl;
- ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/).woCompare(expected) == 0);
+ std::cout << _client.findOne(nss(), BSONObj{} /*filter*/) << std::endl;
+ ASSERT(_client.findOne(nss(), BSONObj{} /*filter*/).woCompare(expected) == 0);
}
private:
@@ -1046,7 +1052,7 @@ public:
case TOPK_ASC:
case BOTTOMK_ASC:
_client.update(ns(), BSONObj{} /*filter*/, getUpdate(i));
- result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ result = _client.findOne(nss(), BSONObj{} /*filter*/);
expected = fromjson("{'_id':0,x:[{a:1,b:1},{a:2,b:2}]}");
ASSERT_BSONOBJ_EQ(result, expected);
break;
@@ -1054,7 +1060,7 @@ public:
case TOPK_DESC:
case BOTTOMK_DESC:
_client.update(ns(), BSONObj{} /*filter*/, getUpdate(i));
- result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ result = _client.findOne(nss(), BSONObj{} /*filter*/);
expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
ASSERT_BSONOBJ_EQ(result, expected);
break;
@@ -1091,7 +1097,7 @@ public:
case TOPK_ASC:
case BOTTOMK_ASC:
_client.update(ns(), BSONObj{} /*filter*/, getUpdate(i));
- result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ result = _client.findOne(nss(), BSONObj{} /*filter*/);
expected = fromjson("{'_id':0,x:[{a:1,b:1},{a:2,b:2}]}");
ASSERT_BSONOBJ_EQ(result, expected);
break;
@@ -1099,7 +1105,7 @@ public:
case TOPK_DESC:
case BOTTOMK_DESC:
_client.update(ns(), BSONObj{} /*filter*/, getUpdate(i));
- result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ result = _client.findOne(nss(), BSONObj{} /*filter*/);
expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
ASSERT_BSONOBJ_EQ(result, expected);
break;
@@ -1135,14 +1141,14 @@ public:
switch (i) {
case TOPK_ASC:
_client.update(ns(), BSONObj{} /*filter*/, getUpdate(i));
- result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ result = _client.findOne(nss(), BSONObj{} /*filter*/);
expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:3,b:3}]}");
ASSERT_BSONOBJ_EQ(result, expected);
break;
case TOPK_DESC:
_client.update(ns(), BSONObj{} /*filter*/, getUpdate(i));
- result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ result = _client.findOne(nss(), BSONObj{} /*filter*/);
expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
ASSERT_BSONOBJ_EQ(result, expected);
break;
@@ -1182,7 +1188,7 @@ public:
BSONObj expected;
_client.update(ns(), BSONObj{} /*filter*/, getUpdate(i));
- result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ result = _client.findOne(nss(), BSONObj{} /*filter*/);
expected = fromjson("{'_id':0,x:[]}");
ASSERT_BSONOBJ_EQ(result, expected);
}
@@ -1215,7 +1221,7 @@ public:
BSONObj expected;
_client.update(ns(), BSONObj{} /*filter*/, getUpdate(i));
- result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ result = _client.findOne(nss(), BSONObj{} /*filter*/);
expected = fromjson("{'_id':0,x:[]}");
ASSERT_BSONOBJ_EQ(result, expected);
}
@@ -1252,7 +1258,7 @@ public:
case TOPK_ASC:
case BOTTOMK_ASC:
_client.update(ns(), BSONObj{} /*filter*/, getUpdate(i));
- result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ result = _client.findOne(nss(), BSONObj{} /*filter*/);
expected = fromjson("{'_id':0,x:[{a:1,b:1},{a:2,b:2}]}");
ASSERT_BSONOBJ_EQ(result, expected);
break;
@@ -1260,7 +1266,7 @@ public:
case TOPK_DESC:
case BOTTOMK_DESC:
_client.update(ns(), BSONObj{} /*filter*/, getUpdate(i));
- result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ result = _client.findOne(nss(), BSONObj{} /*filter*/);
expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
ASSERT_BSONOBJ_EQ(result, expected);
break;
@@ -1297,14 +1303,14 @@ public:
switch (i) {
case TOPK_ASC:
_client.update(ns(), BSONObj{} /*filter*/, getUpdate(i));
- result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ result = _client.findOne(nss(), BSONObj{} /*filter*/);
expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:3,b:3}]}");
ASSERT_BSONOBJ_EQ(result, expected);
break;
case TOPK_DESC:
_client.update(ns(), BSONObj{} /*filter*/, getUpdate(i));
- result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ result = _client.findOne(nss(), BSONObj{} /*filter*/);
expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
ASSERT_BSONOBJ_EQ(result, expected);
break;
@@ -1347,7 +1353,7 @@ public:
case TOPK_ASC:
case BOTTOMK_ASC:
_client.update(ns(), BSONObj{} /*filter*/, getUpdate(i));
- result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ result = _client.findOne(nss(), BSONObj{} /*filter*/);
expected = fromjson("{'_id':0,x:[{a:1,b:1},{a:2,b:2}]}");
ASSERT_BSONOBJ_EQ(result, expected);
break;
@@ -1355,7 +1361,7 @@ public:
case TOPK_DESC:
case BOTTOMK_DESC:
_client.update(ns(), BSONObj{} /*filter*/, getUpdate(i));
- result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ result = _client.findOne(nss(), BSONObj{} /*filter*/);
expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
ASSERT_BSONOBJ_EQ(result, expected);
break;
@@ -1392,14 +1398,14 @@ public:
switch (i) {
case TOPK_ASC:
_client.update(ns(), BSONObj{} /*filter*/, getUpdate(i));
- result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ result = _client.findOne(nss(), BSONObj{} /*filter*/);
expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:3,b:3}]}");
ASSERT_BSONOBJ_EQ(result, expected);
break;
case TOPK_DESC:
_client.update(ns(), BSONObj{} /*filter*/, getUpdate(i));
- result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ result = _client.findOne(nss(), BSONObj{} /*filter*/);
expected = fromjson("{'_id':0,x:[{a:2,b:2},{a:1,b:1}]}");
ASSERT_BSONOBJ_EQ(result, expected);
break;
@@ -1560,7 +1566,7 @@ public:
BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort"
<< BSON("a..d" << 1));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj)));
- BSONObj result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ BSONObj result = _client.findOne(nss(), BSONObj{} /*filter*/);
ASSERT_BSONOBJ_EQ(result, expected);
@@ -1568,28 +1574,28 @@ public:
pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort"
<< BSON("a." << 1));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj)));
- result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ result = _client.findOne(nss(), BSONObj{} /*filter*/);
ASSERT_BSONOBJ_EQ(result, expected);
// { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {.b:1} } } }
pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort"
<< BSON(".b" << 1));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj)));
- result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ result = _client.findOne(nss(), BSONObj{} /*filter*/);
ASSERT_BSONOBJ_EQ(result, expected);
// { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {.:1} } } }
pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort"
<< BSON("." << 1));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj)));
- result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ result = _client.findOne(nss(), BSONObj{} /*filter*/);
ASSERT_BSONOBJ_EQ(result, expected);
// { $push : { x : { $each : [ {a:3} ], $slice:-2, $sort : {'':1} } } }
pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort"
<< BSON("" << 1));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj)));
- result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ result = _client.findOne(nss(), BSONObj{} /*filter*/);
ASSERT_BSONOBJ_EQ(result, expected);
}
};
@@ -1603,7 +1609,7 @@ public:
BSONObj pushObj =
BSON("$each" << BSON_ARRAY(3) << "$slice" << -2 << "$sort" << BSON("a" << 1));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj)));
- BSONObj result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ BSONObj result = _client.findOne(nss(), BSONObj{} /*filter*/);
ASSERT_BSONOBJ_EQ(result, expected);
}
};
@@ -1617,7 +1623,7 @@ public:
BSONObj pushObj =
BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort" << 2);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj)));
- BSONObj result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ BSONObj result = _client.findOne(nss(), BSONObj{} /*filter*/);
ASSERT_BSONOBJ_EQ(result, expected);
}
};
@@ -1631,7 +1637,7 @@ public:
BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << 2 << "$sort"
<< BSON("a" << 1));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj)));
- BSONObj result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ BSONObj result = _client.findOne(nss(), BSONObj{} /*filter*/);
ASSERT_BSONOBJ_EQ(result, expected);
}
};
@@ -1645,7 +1651,7 @@ public:
BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2.1 << "$sort"
<< BSON("a" << 1));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj)));
- BSONObj result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ BSONObj result = _client.findOne(nss(), BSONObj{} /*filter*/);
ASSERT_BSONOBJ_EQ(result, expected);
}
};
@@ -1659,7 +1665,7 @@ public:
<< BSON("a" << 1));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj)));
BSONObj expected = fromjson("{'_id':0,x:[{a:2},{a:3}]}");
- BSONObj result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ BSONObj result = _client.findOne(nss(), BSONObj{} /*filter*/);
ASSERT_BSONOBJ_EQ(result, expected);
}
};
@@ -1673,7 +1679,7 @@ public:
BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2.0 << "$sort"
<< BSON_ARRAY(2 << 1));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj)));
- BSONObj result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ BSONObj result = _client.findOne(nss(), BSONObj{} /*filter*/);
ASSERT_BSONOBJ_EQ(result, expected);
}
};
@@ -1687,7 +1693,7 @@ public:
BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 3)) << "$slice" << -2 << "$sort"
<< BSON("a" << 10));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj)));
- BSONObj result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ BSONObj result = _client.findOne(nss(), BSONObj{} /*filter*/);
ASSERT_BSONOBJ_EQ(result, expected);
}
};
@@ -1701,7 +1707,7 @@ public:
<< "$slice" << -2.0);
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj)));
BSONObj expected = fromjson("{'_id':0,x:[{a:2},{a:3}]}");
- BSONObj result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ BSONObj result = _client.findOne(nss(), BSONObj{} /*filter*/);
ASSERT_BSONOBJ_EQ(result, expected);
}
};
@@ -1715,7 +1721,7 @@ public:
BSONObj pushObj = BSON("$each" << BSON_ARRAY(BSON("a" << 2)) << "$sort" << BSON("a" << 1)
<< "$sort" << BSON("a" << 1));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$push" << BSON("x" << pushObj)));
- BSONObj result = _client.findOne(ns(), BSONObj{} /*filter*/);
+ BSONObj result = _client.findOne(nss(), BSONObj{} /*filter*/);
ASSERT_BSONOBJ_EQ(result, expected);
}
};
@@ -1726,7 +1732,7 @@ public:
_client.insert(ns(), fromjson("{'_id':0,a:{b:4}}"));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$inc" << BSON("a" << 4.0)));
ASSERT(
- _client.findOne(ns(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:{b:4}}")) ==
+ _client.findOne(nss(), BSONObj{} /*filter*/).woCompare(fromjson("{'_id':0,a:{b:4}}")) ==
0);
}
};
@@ -1736,7 +1742,7 @@ public:
void run() {
_client.insert(ns(), fromjson("{'_id':0,a:{b:{}}}"));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a.c" << 4.0)));
- ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/)
+ ASSERT(_client.findOne(nss(), BSONObj{} /*filter*/)
.woCompare(fromjson("{'_id':0,a:{b:{},c:4}}")) == 0);
}
};
@@ -1746,7 +1752,7 @@ public:
void run() {
_client.insert(ns(), fromjson("{'_id':0,a:{b:{}}}"));
_client.update(ns(), BSONObj{} /*filter*/, BSON("$set" << BSON("a.b.f" << 4.0)));
- ASSERT(_client.findOne(ns(), BSONObj{} /*filter*/)
+ ASSERT(_client.findOne(nss(), BSONObj{} /*filter*/)
.woCompare(fromjson("{'_id':0,a:{b:{f:4}}}")) == 0);
}
};
@@ -1758,10 +1764,10 @@ public:
_client.insert(ns(), fromjson("{'_id':0}"));
_client.update(ns(), BSONObj{} /*filter*/, fromjson("{$set:{'a.b':4}}"));
ASSERT_BSONOBJ_EQ(fromjson("{'_id':0,a:{b:4}}"),
- _client.findOne(ns(), BSONObj{} /*filter*/));
+ _client.findOne(nss(), BSONObj{} /*filter*/));
ASSERT_BSONOBJ_EQ(
fromjson("{'_id':0,a:{b:4}}"),
- _client.findOne(ns(), fromjson("{'a.b':4}"))); // make sure the index works
+ _client.findOne(nss(), fromjson("{'a.b':4}"))); // make sure the index works
}
};
@@ -1770,7 +1776,10 @@ public:
void run() {
_client.insert(ns(), BSON("_id" << 55 << "i" << 5));
_client.update(ns(), BSON("i" << 5), BSON("i" << 6));
- ASSERT(!_client.findOne(ns(), BSON("_id" << 55), Query().hint(BSON("_id" << 1))).isEmpty());
+ FindCommandRequest findCmd{nss()};
+ findCmd.setFilter(BSON("_id" << 55));
+ findCmd.setHint(BSON("_id" << 1));
+ ASSERT(!_client.findOne(std::move(findCmd)).isEmpty());
}
};
@@ -1779,7 +1788,7 @@ public:
void run() {
_client.update(ns(), BSONObj(), BSON("_id" << 52307 << "$set" << BSON("q" << 3)), true);
ASSERT_BSONOBJ_EQ(fromjson("{'_id':52307,$set:{q:3}}"),
- _client.findOne(ns(), BSON("_id" << 52307)));
+ _client.findOne(nss(), BSON("_id" << 52307)));
}
};
@@ -1788,7 +1797,7 @@ public:
void run() {
_client.insert(ns(), BSON("a" << 5));
_client.update(ns(), BSON("a" << 5), fromjson("{$set:{b:null}}"));
- ASSERT_EQUALS(jstNULL, _client.findOne(ns(), BSON("a" << 5)).getField("b").type());
+ ASSERT_EQUALS(jstNULL, _client.findOne(nss(), BSON("a" << 5)).getField("b").type());
}
};
@@ -1801,7 +1810,7 @@ public:
_client.update(ns(), BSONObj(), BSON("$set" << BSON("x.b" << 1 << "x.c" << 1)));
ASSERT_BSONOBJ_EQ(BSON("_id" << 0 << "a" << 1 << "x" << BSON("b" << 1 << "c" << 1) << "x"
<< BSONObj() << "z" << 5),
- _client.findOne(ns(), BSONObj{} /*filter*/));
+ _client.findOne(nss(), BSONObj{} /*filter*/));
}
};
@@ -1815,7 +1824,7 @@ public:
ns(), BSONObj(), BSON("$set" << BSON("x.b" << 1 << "x.c" << 1 << "x.d" << 1)));
ASSERT_BSONOBJ_EQ(BSON("_id" << 0 << "x" << BSON("b" << 1 << "c" << 1 << "d" << 1) << "x"
<< BSONObj() << "x" << BSONObj()),
- _client.findOne(ns(), BSONObj{} /*filter*/));
+ _client.findOne(nss(), BSONObj{} /*filter*/));
}
};
@@ -1825,7 +1834,7 @@ public:
_client.insert(ns(), BSON("_id" << 0 << "x" << 5));
_client.update(ns(), BSONObj(), BSON("$set" << BSON("a" << 1 << "b" << 1 << "x" << 10)));
ASSERT_EQUALS(mutablebson::unordered(BSON("_id" << 0 << "a" << 1 << "b" << 1 << "x" << 10)),
- mutablebson::unordered(_client.findOne(ns(), BSONObj{} /*filter*/)));
+ mutablebson::unordered(_client.findOne(nss(), BSONObj{} /*filter*/)));
}
};
@@ -1844,7 +1853,7 @@ protected:
}
BSONObj findOne() {
- return _client.findOne(ns(), BSONObj{} /*filter*/);
+ return _client.findOne(NamespaceString{ns()}, BSONObj{} /*filter*/);
}
void test(const char* initial, const char* mod, const char* after) {
diff --git a/src/mongo/shell/encrypted_dbclient_base.cpp b/src/mongo/shell/encrypted_dbclient_base.cpp
index d843b095aef..e27b0e40a75 100644
--- a/src/mongo/shell/encrypted_dbclient_base.cpp
+++ b/src/mongo/shell/encrypted_dbclient_base.cpp
@@ -624,12 +624,10 @@ std::shared_ptr<SymmetricKey> EncryptedDBClientBase::getDataKey(const UUID& uuid
std::shared_ptr<SymmetricKey> EncryptedDBClientBase::getDataKeyFromDisk(const UUID& uuid) {
NamespaceString fullNameNS = getCollectionNS();
- BSONObj dataKeyObj = _conn->findOne(fullNameNS.ns(),
- BSON("_id" << uuid),
- Query(),
- nullptr,
- 0,
- repl::ReadConcernArgs::kImplicitDefault);
+ FindCommandRequest findCmd{fullNameNS};
+ findCmd.setFilter(BSON("_id" << uuid));
+ findCmd.setReadConcern(repl::ReadConcernArgs::kImplicitDefault);
+ BSONObj dataKeyObj = _conn->findOne(std::move(findCmd));
if (dataKeyObj.isEmpty()) {
uasserted(ErrorCodes::BadValue, "Invalid keyID.");
}