summaryrefslogtreecommitdiff
path: root/src/mongo/db
diff options
context:
space:
mode:
authorIrina Yatsenko <irina.yatsenko@mongodb.com>2021-08-16 15:35:51 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-08-23 19:44:53 +0000
commitdf329d8f46e1485dd5d70379f9c48bf4175f0d5a (patch)
tree80adf0442b021bdd689d59697a6b85ebf4dab24d /src/mongo/db
parent5cf8a293567989fcc970fb21cde4a1af111c8b58 (diff)
downloadmongo-df329d8f46e1485dd5d70379f9c48bf4175f0d5a.tar.gz
SERVER-58670 Tighten up what kind of BSON the 'Query' type can be wrapped around
This refactor includes: Remove dead code from 'Query' type and reduce it public interface. Split query argument in query/update/removed methods into filter BSON and settings (which are still passed around as 'Query' type). Remove Query(string) constructors. Remove most callers of 'Query(const BSONObj&)'. Replace public 'Query(const BSON&)' and 'Query.obj' with an explicit factory method and a getter.
Diffstat (limited to 'src/mongo/db')
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_d.cpp4
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_d.h2
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_local.h4
-rw-r--r--src/mongo/db/cloner.cpp16
-rw-r--r--src/mongo/db/cloner.h1
-rw-r--r--src/mongo/db/commands/dbcommands_d.cpp5
-rw-r--r--src/mongo/db/commands/user_management_commands.cpp2
-rw-r--r--src/mongo/db/dbdirectclient.cpp5
-rw-r--r--src/mongo/db/dbdirectclient.h3
-rw-r--r--src/mongo/db/exhaust_cursor_currentop_integration_test.cpp67
-rw-r--r--src/mongo/db/op_observer_impl_test.cpp6
-rw-r--r--src/mongo/db/ops/write_ops_retryability.cpp7
-rw-r--r--src/mongo/db/persistent_task_store.h28
-rw-r--r--src/mongo/db/persistent_task_store_test.cpp52
-rw-r--r--src/mongo/db/query/query_request_helper.cpp33
-rw-r--r--src/mongo/db/query/query_request_helper.h4
-rw-r--r--src/mongo/db/query/query_request_test.cpp42
-rw-r--r--src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp2
-rw-r--r--src/mongo/db/repl/all_database_cloner.cpp2
-rw-r--r--src/mongo/db/repl/all_database_cloner_test.cpp3
-rw-r--r--src/mongo/db/repl/apply_ops.cpp4
-rw-r--r--src/mongo/db/repl/collection_cloner.cpp6
-rw-r--r--src/mongo/db/repl/initial_sync_base_cloner.cpp3
-rw-r--r--src/mongo/db/repl/oplog.cpp7
-rw-r--r--src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp4
-rw-r--r--src/mongo/db/repl/oplog_fetcher.cpp39
-rw-r--r--src/mongo/db/repl/oplog_fetcher.h6
-rw-r--r--src/mongo/db/repl/oplog_fetcher_test.cpp19
-rw-r--r--src/mongo/db/repl/oplog_interface_remote.cpp1
-rw-r--r--src/mongo/db/repl/primary_only_service.cpp2
-rw-r--r--src/mongo/db/repl/replication_recovery.cpp3
-rw-r--r--src/mongo/db/repl/roll_back_local_operations_test.cpp3
-rw-r--r--src/mongo/db/repl/rollback_impl.cpp8
-rw-r--r--src/mongo/db/repl/rollback_source_impl.cpp5
-rw-r--r--src/mongo/db/repl/tenant_collection_cloner.cpp25
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_service.cpp9
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_service_test.cpp2
-rw-r--r--src/mongo/db/repl/transaction_oplog_application.cpp4
-rw-r--r--src/mongo/db/rs_local_client.cpp10
-rw-r--r--src/mongo/db/s/chunk_splitter.cpp3
-rw-r--r--src/mongo/db/s/config/configsvr_abort_reshard_collection_command.cpp2
-rw-r--r--src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp2
-rw-r--r--src/mongo/db/s/create_collection_coordinator.cpp2
-rw-r--r--src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp4
-rw-r--r--src/mongo/db/s/migration_util.cpp26
-rw-r--r--src/mongo/db/s/migration_util.h2
-rw-r--r--src/mongo/db/s/persistent_task_queue.h8
-rw-r--r--src/mongo/db/s/range_deletion_util.cpp18
-rw-r--r--src/mongo/db/s/range_deletion_util_test.cpp2
-rw-r--r--src/mongo/db/s/recoverable_critical_section_service.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp3
-rw-r--r--src/mongo/db/s/resharding/resharding_coordinator_test.cpp18
-rw-r--r--src/mongo/db/s/resharding/resharding_data_copy_util.cpp6
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_donor_service_test.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_manual_cleanup.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_applier.cpp2
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp41
-rw-r--r--src/mongo/db/s/resharding/resharding_oplog_session_application_test.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_recipient_service_test.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_txn_cloner.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp13
-rw-r--r--src/mongo/db/s/resharding_destined_recipient_test.cpp2
-rw-r--r--src/mongo/db/s/session_catalog_migration_destination_test.cpp2
-rw-r--r--src/mongo/db/s/session_catalog_migration_source.cpp12
-rw-r--r--src/mongo/db/s/shard_key_util.cpp2
-rw-r--r--src/mongo/db/s/shard_metadata_util.cpp19
-rw-r--r--src/mongo/db/s/shard_metadata_util_test.cpp11
-rw-r--r--src/mongo/db/s/sharding_ddl_util_test.cpp28
-rw-r--r--src/mongo/db/s/transaction_coordinator_test.cpp7
-rw-r--r--src/mongo/db/s/transaction_coordinator_util.cpp8
-rw-r--r--src/mongo/db/s/vector_clock_shard_server_test.cpp4
-rw-r--r--src/mongo/db/session_catalog_mongod.cpp18
-rw-r--r--src/mongo/db/transaction_participant.cpp5
-rw-r--r--src/mongo/db/transaction_participant_retryable_writes_test.cpp8
-rw-r--r--src/mongo/db/vector_clock_mongod.cpp4
77 files changed, 381 insertions, 373 deletions
diff --git a/src/mongo/db/auth/authz_manager_external_state_d.cpp b/src/mongo/db/auth/authz_manager_external_state_d.cpp
index 66f8e74215d..e8dc4cdda7a 100644
--- a/src/mongo/db/auth/authz_manager_external_state_d.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_d.cpp
@@ -60,12 +60,12 @@ AuthzManagerExternalStateMongod::makeAuthzSessionExternalState(AuthorizationMana
Status AuthzManagerExternalStateMongod::query(
OperationContext* opCtx,
const NamespaceString& collectionName,
- const BSONObj& query,
+ const BSONObj& filter,
const BSONObj& projection,
const std::function<void(const BSONObj&)>& resultProcessor) {
try {
DBDirectClient client(opCtx);
- client.query(resultProcessor, collectionName, query, &projection);
+ client.query(resultProcessor, collectionName, filter, Query(), &projection);
return Status::OK();
} catch (const DBException& e) {
return e.toStatus();
diff --git a/src/mongo/db/auth/authz_manager_external_state_d.h b/src/mongo/db/auth/authz_manager_external_state_d.h
index d1fedc3be8a..62bd7a08fce 100644
--- a/src/mongo/db/auth/authz_manager_external_state_d.h
+++ b/src/mongo/db/auth/authz_manager_external_state_d.h
@@ -62,7 +62,7 @@ public:
const BSONObj& query) final;
Status query(OperationContext* opCtx,
const NamespaceString& collectionName,
- const BSONObj& query,
+ const BSONObj& filter,
const BSONObj& projection,
const std::function<void(const BSONObj&)>& resultProcessor) final;
};
diff --git a/src/mongo/db/auth/authz_manager_external_state_local.h b/src/mongo/db/auth/authz_manager_external_state_local.h
index 31fd864684e..4f2807b6b57 100644
--- a/src/mongo/db/auth/authz_manager_external_state_local.h
+++ b/src/mongo/db/auth/authz_manager_external_state_local.h
@@ -101,12 +101,12 @@ public:
const BSONObj& query) = 0;
/**
- * Finds all documents matching "query" in "collectionName". For each document returned,
+ * Finds all documents matching "filter" in "collectionName". For each document returned,
* calls the function resultProcessor on it.
*/
virtual Status query(OperationContext* opCtx,
const NamespaceString& collectionName,
- const BSONObj& query,
+ const BSONObj& filter,
const BSONObj& projection,
const std::function<void(const BSONObj&)>& resultProcessor) = 0;
diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp
index e0ff1859e51..41d4ae540b3 100644
--- a/src/mongo/db/cloner.cpp
+++ b/src/mongo/db/cloner.cpp
@@ -226,21 +226,20 @@ struct Cloner::Fun {
time_t saveLast;
};
-/* copy the specified collection
+/**
+ * Copy the specified collection.
*/
void Cloner::_copy(OperationContext* opCtx,
const std::string& toDBName,
const NamespaceString& nss,
const BSONObj& from_opts,
const BSONObj& from_id_index,
- Query query,
DBClientBase* conn) {
LOGV2_DEBUG(20414,
2,
- "\t\tcloning collection with filter",
+ "\t\tcloning collection",
"ns"_attr = nss,
- "conn_getServerAddress"_attr = conn->getServerAddress(),
- "query"_attr = redact(query.toString()));
+ "conn_getServerAddress"_attr = conn->getServerAddress());
Fun f(opCtx, toDBName);
f.numSeen = 0;
@@ -254,7 +253,8 @@ void Cloner::_copy(OperationContext* opCtx,
Lock::TempRelease tempRelease(opCtx->lockState());
conn->query(std::function<void(DBClientCursorBatchIterator&)>(f),
nss,
- query,
+ BSONObj{} /* filter */,
+ Query() /* querySettings */,
nullptr,
options,
0 /* batchSize */,
@@ -262,8 +262,7 @@ void Cloner::_copy(OperationContext* opCtx,
}
uassert(ErrorCodes::PrimarySteppedDown,
- str::stream() << "Not primary while cloning collection " << nss.ns() << " with filter "
- << query.toString(),
+ str::stream() << "Not primary while cloning collection " << nss.ns(),
!opCtx->writesAreReplicated() ||
repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss));
}
@@ -582,7 +581,6 @@ Status Cloner::copyDb(OperationContext* opCtx,
nss,
params.collectionInfo["options"].Obj(),
params.idIndexSpec,
- Query(),
conn.get());
}
diff --git a/src/mongo/db/cloner.h b/src/mongo/db/cloner.h
index 0760355a3ff..8d1d512fe1f 100644
--- a/src/mongo/db/cloner.h
+++ b/src/mongo/db/cloner.h
@@ -95,7 +95,6 @@ private:
const NamespaceString& nss,
const BSONObj& from_opts,
const BSONObj& from_id_index,
- Query q,
DBClientBase* conn);
void _copyIndexes(OperationContext* opCtx,
diff --git a/src/mongo/db/commands/dbcommands_d.cpp b/src/mongo/db/commands/dbcommands_d.cpp
index e7a6b76d2cf..67a1549b4fc 100644
--- a/src/mongo/db/commands/dbcommands_d.cpp
+++ b/src/mongo/db/commands/dbcommands_d.cpp
@@ -410,9 +410,8 @@ public:
const BSONObj& query,
const BSONObj& sort) {
DBDirectClient client(opCtx);
- Query q(query);
- q.sort(sort);
- std::unique_ptr<DBClientCursor> c = client.query(NamespaceString(ns), q);
+ std::unique_ptr<DBClientCursor> c =
+ client.query(NamespaceString(ns), query, Query().sort(sort));
while (c->more()) {
LOGV2(20454, "Chunk: {chunk}", "Dumping chunks", "chunk"_attr = c->nextSafe());
}
diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp
index 2f6d761854e..d4da340dc5b 100644
--- a/src/mongo/db/commands/user_management_commands.cpp
+++ b/src/mongo/db/commands/user_management_commands.cpp
@@ -220,7 +220,7 @@ Status queryAuthzDocument(OperationContext* opCtx,
const std::function<void(const BSONObj&)>& resultProcessor) {
try {
DBDirectClient client(opCtx);
- client.query(resultProcessor, collectionName, query, &projection);
+ client.query(resultProcessor, collectionName, query, Query(), &projection);
return Status::OK();
} catch (const DBException& e) {
return e.toStatus();
diff --git a/src/mongo/db/dbdirectclient.cpp b/src/mongo/db/dbdirectclient.cpp
index f2e0b3bd629..33edc482347 100644
--- a/src/mongo/db/dbdirectclient.cpp
+++ b/src/mongo/db/dbdirectclient.cpp
@@ -153,7 +153,8 @@ void DBDirectClient::say(Message& toSend, bool isRetry, string* actualServer) {
}
unique_ptr<DBClientCursor> DBDirectClient::query(const NamespaceStringOrUUID& nsOrUuid,
- Query query,
+ const BSONObj& filter,
+ const Query& querySettings,
int limit,
int nToSkip,
const BSONObj* fieldsToReturn,
@@ -162,7 +163,7 @@ unique_ptr<DBClientCursor> DBDirectClient::query(const NamespaceStringOrUUID& ns
boost::optional<BSONObj> readConcernObj) {
invariant(!readConcernObj, "passing readConcern to DBDirectClient functions is not supported");
return DBClientBase::query(
- nsOrUuid, query, limit, nToSkip, fieldsToReturn, queryOptions, batchSize);
+ nsOrUuid, filter, querySettings, limit, nToSkip, fieldsToReturn, queryOptions, batchSize);
}
write_ops::FindAndModifyCommandReply DBDirectClient::findAndModify(
diff --git a/src/mongo/db/dbdirectclient.h b/src/mongo/db/dbdirectclient.h
index 74a09f8cef3..94eff680c4a 100644
--- a/src/mongo/db/dbdirectclient.h
+++ b/src/mongo/db/dbdirectclient.h
@@ -56,7 +56,8 @@ public:
std::unique_ptr<DBClientCursor> query(
const NamespaceStringOrUUID& nsOrUuid,
- Query query,
+ const BSONObj& filter,
+ const Query& querySettings = Query(),
int limit = 0,
int nToSkip = 0,
const BSONObj* fieldsToReturn = nullptr,
diff --git a/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp b/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp
index e8a167ccdcc..897cdc62e2b 100644
--- a/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp
+++ b/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp
@@ -142,38 +142,41 @@ auto startExhaustQuery(
Milliseconds awaitDataTimeoutMS = Milliseconds(5000),
const boost::optional<repl::OpTime>& lastKnownCommittedOpTime = boost::none) {
queryOptions = queryOptions | QueryOption_Exhaust;
- auto queryThread = stdx::async(
- stdx::launch::async,
- [&queryCursor,
- queryConnection,
- queryOptions,
- awaitDataTimeoutMS,
- lastKnownCommittedOpTime] {
- const auto projSpec = BSON("_id" << 0 << "a" << 1);
- // Issue the initial 'find' with a batchSize of 2 and the exhaust flag set. We then
- // iterate through the first batch and confirm that the results are as expected.
- queryCursor = queryConnection->query(testNSS, {}, 0, 0, &projSpec, queryOptions, 2);
- for (int i = 0; i < 2; ++i) {
- ASSERT_BSONOBJ_EQ(queryCursor->nextSafe(), BSON("a" << i));
- }
- // Having exhausted the two results returned by the initial find, we set the batchSize
- // to 1 and issue a single getMore via DBClientCursor::more(). Because the 'exhaust'
- // flag is set, the server will generate a series of internal getMores and stream them
- // back to the client until the cursor is exhausted, without the client sending any
- // further getMore requests. We expect this request to hang at the
- // 'waitWithPinnedCursorDuringGetMoreBatch' failpoint.
- queryCursor->setBatchSize(1);
- if ((queryOptions & QueryOption_CursorTailable) &&
- (queryOptions & QueryOption_AwaitData)) {
- queryCursor->setAwaitDataTimeoutMS(awaitDataTimeoutMS);
- if (lastKnownCommittedOpTime) {
- auto term = lastKnownCommittedOpTime.get().getTerm();
- queryCursor->setCurrentTermAndLastCommittedOpTime(term,
- lastKnownCommittedOpTime);
- }
- }
- ASSERT(queryCursor->more());
- });
+ auto queryThread =
+ stdx::async(stdx::launch::async,
+ [&queryCursor,
+ queryConnection,
+ queryOptions,
+ awaitDataTimeoutMS,
+ lastKnownCommittedOpTime] {
+ const auto projSpec = BSON("_id" << 0 << "a" << 1);
+ // Issue the initial 'find' with a batchSize of 2 and the exhaust flag set.
+ // We then iterate through the first batch and confirm that the results are
+ // as expected.
+ queryCursor = queryConnection->query(
+ testNSS, BSONObj{}, Query(), 0, 0, &projSpec, queryOptions, 2);
+ for (int i = 0; i < 2; ++i) {
+ ASSERT_BSONOBJ_EQ(queryCursor->nextSafe(), BSON("a" << i));
+ }
+ // Having exhausted the two results returned by the initial find, we set the
+ // batchSize to 1 and issue a single getMore via DBClientCursor::more().
+ // Because the 'exhaust' flag is set, the server will generate a series of
+ // internal getMores and stream them back to the client until the cursor is
+ // exhausted, without the client sending any further getMore requests. We
+ // expect this request to hang at the
+ // 'waitWithPinnedCursorDuringGetMoreBatch' failpoint.
+ queryCursor->setBatchSize(1);
+ if ((queryOptions & QueryOption_CursorTailable) &&
+ (queryOptions & QueryOption_AwaitData)) {
+ queryCursor->setAwaitDataTimeoutMS(awaitDataTimeoutMS);
+ if (lastKnownCommittedOpTime) {
+ auto term = lastKnownCommittedOpTime.get().getTerm();
+ queryCursor->setCurrentTermAndLastCommittedOpTime(
+ term, lastKnownCommittedOpTime);
+ }
+ }
+ ASSERT(queryCursor->more());
+ });
// Wait until the parallel operation initializes its cursor.
const auto startTime = clock->now();
diff --git a/src/mongo/db/op_observer_impl_test.cpp b/src/mongo/db/op_observer_impl_test.cpp
index c197c936cc5..4bbe23b0a66 100644
--- a/src/mongo/db/op_observer_impl_test.cpp
+++ b/src/mongo/db/op_observer_impl_test.cpp
@@ -785,7 +785,7 @@ protected:
boost::optional<DurableTxnStateEnum> txnState) {
DBDirectClient client(opCtx());
auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace,
- {BSON("_id" << session()->getSessionId().toBSON())});
+ BSON("_id" << session()->getSessionId().toBSON()));
ASSERT(cursor);
ASSERT(cursor->more());
@@ -811,7 +811,7 @@ protected:
void assertNoTxnRecord() {
DBDirectClient client(opCtx());
auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace,
- {BSON("_id" << session()->getSessionId().toBSON())});
+ BSON("_id" << session()->getSessionId().toBSON()));
ASSERT(cursor);
ASSERT(!cursor->more());
}
@@ -819,7 +819,7 @@ protected:
void assertTxnRecordStartOpTime(boost::optional<repl::OpTime> startOpTime) {
DBDirectClient client(opCtx());
auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace,
- {BSON("_id" << session()->getSessionId().toBSON())});
+ BSON("_id" << session()->getSessionId().toBSON()));
ASSERT(cursor);
ASSERT(cursor->more());
diff --git a/src/mongo/db/ops/write_ops_retryability.cpp b/src/mongo/db/ops/write_ops_retryability.cpp
index a09d98495ee..65fffa768d9 100644
--- a/src/mongo/db/ops/write_ops_retryability.cpp
+++ b/src/mongo/db/ops/write_ops_retryability.cpp
@@ -118,8 +118,8 @@ BSONObj extractPreOrPostImage(OperationContext* opCtx, const repl::OplogEntry& o
LogicalSessionId sessionId = oplog.getSessionId().get();
TxnNumber txnNumber = oplog.getTxnNumber().get();
Timestamp ts = oplog.getTimestamp();
- const auto query = BSON("_id" << sessionId.toBSON());
- BSONObj imageDoc = client.findOne(NamespaceString::kConfigImagesNamespace.ns(), query);
+ BSONObj imageDoc = client.findOne(NamespaceString::kConfigImagesNamespace.ns(),
+ BSON("_id" << sessionId.toBSON()) /*filter*/);
if (imageDoc.isEmpty()) {
LOGV2_WARNING(5676402,
"Image lookup for a retryable findAndModify was not found",
@@ -170,9 +170,8 @@ BSONObj extractPreOrPostImage(OperationContext* opCtx, const repl::OplogEntry& o
auto opTime = oplog.getPreImageOpTime() ? oplog.getPreImageOpTime().value()
: oplog.getPostImageOpTime().value();
-
auto oplogDoc =
- client.findOne(NamespaceString::kRsOplogNamespace.ns(), opTime.asQuery(), nullptr);
+ client.findOne(NamespaceString::kRsOplogNamespace.ns(), opTime.asQuery(), Query(), nullptr);
uassert(40613,
str::stream() << "oplog no longer contains the complete write history of this "
diff --git a/src/mongo/db/persistent_task_store.h b/src/mongo/db/persistent_task_store.h
index cef18c359c4..d5f3d7a2c16 100644
--- a/src/mongo/db/persistent_task_store.h
+++ b/src/mongo/db/persistent_task_store.h
@@ -83,10 +83,10 @@ public:
* multiple documents match, at most one document will be updated.
*/
void update(OperationContext* opCtx,
- Query query,
+ const BSONObj& filter,
const BSONObj& update,
const WriteConcernOptions& writeConcern = WriteConcerns::kMajorityWriteConcern) {
- _update(opCtx, std::move(query), update, /* upsert */ false, writeConcern);
+ _update(opCtx, filter, update, /* upsert */ false, writeConcern);
}
/**
@@ -94,17 +94,17 @@ public:
* multiple documents match, at most one document will be updated.
*/
void upsert(OperationContext* opCtx,
- Query query,
+ const BSONObj& filter,
const BSONObj& update,
const WriteConcernOptions& writeConcern = WriteConcerns::kMajorityWriteConcern) {
- _update(opCtx, std::move(query), update, /* upsert */ true, writeConcern);
+ _update(opCtx, filter, update, /* upsert */ true, writeConcern);
}
/**
* Removes all documents which match the given query.
*/
void remove(OperationContext* opCtx,
- Query query,
+ const BSONObj& filter,
const WriteConcernOptions& writeConcern = WriteConcerns::kMajorityWriteConcern) {
DBDirectClient dbClient(opCtx);
@@ -114,7 +114,7 @@ public:
deleteOp.setDeletes({[&] {
write_ops::DeleteOpEntry entry;
- entry.setQ(query.obj);
+ entry.setQ(filter);
entry.setMulti(true);
return entry;
@@ -136,10 +136,12 @@ public:
* Iteration can be stopped early if the callback returns false indicating that it doesn't want
* to continue.
*/
- void forEach(OperationContext* opCtx, Query query, std::function<bool(const T&)> handler) {
+ void forEach(OperationContext* opCtx,
+ const BSONObj& filter,
+ std::function<bool(const T&)> handler) {
DBDirectClient dbClient(opCtx);
- auto cursor = dbClient.query(_storageNss, query);
+ auto cursor = dbClient.query(_storageNss, filter);
while (cursor->more()) {
auto bson = cursor->next();
@@ -154,18 +156,18 @@ public:
/**
* Returns the number of documents in the store matching the given query.
*/
- size_t count(OperationContext* opCtx, Query query = Query()) {
+ size_t count(OperationContext* opCtx, const BSONObj& filter = BSONObj{}) {
DBDirectClient client(opCtx);
auto projection = BSON("_id" << 1);
- auto cursor = client.query(_storageNss, query, 0, 0, &projection);
+ auto cursor = client.query(_storageNss, filter, Query(), 0, 0, &projection);
return cursor->itcount();
}
private:
void _update(OperationContext* opCtx,
- Query query,
+ const BSONObj& filter,
const BSONObj& update,
bool upsert,
const WriteConcernOptions& writeConcern = WriteConcerns::kMajorityWriteConcern) {
@@ -174,7 +176,7 @@ private:
auto commandResponse = dbClient.runCommand([&] {
write_ops::UpdateCommandRequest updateOp(_storageNss);
auto updateModification = write_ops::UpdateModification::parseFromClassicUpdate(update);
- write_ops::UpdateOpEntry updateEntry(query.obj, updateModification);
+ write_ops::UpdateOpEntry updateEntry(filter, updateModification);
updateEntry.setMulti(false);
updateEntry.setUpsert(upsert);
updateOp.setUpdates({updateEntry});
@@ -187,7 +189,7 @@ private:
uassert(ErrorCodes::NoMatchingDocument,
"No matching document found for query {} on namespace {}"_format(
- query.toString(), _storageNss.toString()),
+ filter.toString(), _storageNss.toString()),
upsert || commandReply.getIntField("n") > 0);
WriteConcernResult ignoreResult;
diff --git a/src/mongo/db/persistent_task_store_test.cpp b/src/mongo/db/persistent_task_store_test.cpp
index a8167ac9dbb..dd995a3eda0 100644
--- a/src/mongo/db/persistent_task_store_test.cpp
+++ b/src/mongo/db/persistent_task_store_test.cpp
@@ -105,8 +105,8 @@ TEST_F(PersistentTaskStoreTest, TestForEach) {
// No match.
int count = 0;
store.forEach(opCtx,
- QUERY("key"
- << "four"),
+ BSON("key"
+ << "four"),
[&count](const TestTask& t) {
++count;
return true;
@@ -115,7 +115,7 @@ TEST_F(PersistentTaskStoreTest, TestForEach) {
// Multiple matches.
count = 0;
- store.forEach(opCtx, QUERY("min" << GTE << 10), [&count](const TestTask& t) {
+ store.forEach(opCtx, BSON("min" << GTE << 10), [&count](const TestTask& t) {
++count;
return true;
});
@@ -123,7 +123,7 @@ TEST_F(PersistentTaskStoreTest, TestForEach) {
// Multiple matches, only take one.
count = 0;
- store.forEach(opCtx, QUERY("min" << GTE << 10), [&count](const TestTask& t) {
+ store.forEach(opCtx, BSON("min" << GTE << 10), [&count](const TestTask& t) {
++count;
return count < 1;
});
@@ -132,8 +132,8 @@ TEST_F(PersistentTaskStoreTest, TestForEach) {
// Single match.
count = 0;
store.forEach(opCtx,
- QUERY("key"
- << "one"),
+ BSON("key"
+ << "one"),
[&count](const TestTask& t) {
++count;
return true;
@@ -153,8 +153,8 @@ TEST_F(PersistentTaskStoreTest, TestRemove) {
ASSERT_EQ(store.count(opCtx), 3);
store.remove(opCtx,
- QUERY("key"
- << "one"));
+ BSON("key"
+ << "one"));
ASSERT_EQ(store.count(opCtx), 2);
}
@@ -171,7 +171,7 @@ TEST_F(PersistentTaskStoreTest, TestRemoveMultiple) {
ASSERT_EQ(store.count(opCtx), 3);
// Remove multipe overlapping ranges.
- store.remove(opCtx, QUERY("min" << GTE << 10));
+ store.remove(opCtx, BSON("min" << GTE << 10));
ASSERT_EQ(store.count(opCtx), 1);
}
@@ -189,13 +189,13 @@ TEST_F(PersistentTaskStoreTest, TestUpdate) {
ASSERT_EQ(store.count(opCtx), 3);
store.update(opCtx,
- QUERY("key"
- << "one"),
+ BSON("key"
+ << "one"),
BSON("$inc" << BSON("min" << 1)));
store.forEach(opCtx,
- QUERY("key"
- << "one"),
+ BSON("key"
+ << "one"),
[&](const TestTask& task) {
ASSERT_EQ(task.min, expectedUpdatedMin);
return false;
@@ -214,9 +214,9 @@ TEST_F(PersistentTaskStoreTest, TestUpdateOnlyUpdatesOneMatchingDocument) {
store.add(opCtx, TestTask{"three", 40, 50});
// Update query will match two documents but should only update one of them.
- store.update(opCtx, QUERY("key" << keyToMatch), BSON("$inc" << BSON("min" << 1)));
+ store.update(opCtx, BSON("key" << keyToMatch), BSON("$inc" << BSON("min" << 1)));
- ASSERT_EQ(store.count(opCtx, QUERY("key" << keyToMatch << "min" << expectedUpdatedMin)), 1);
+ ASSERT_EQ(store.count(opCtx, BSON("key" << keyToMatch << "min" << expectedUpdatedMin)), 1);
}
TEST_F(PersistentTaskStoreTest, TestUpsert) {
@@ -225,7 +225,7 @@ TEST_F(PersistentTaskStoreTest, TestUpsert) {
PersistentTaskStore<TestTask> store(kNss);
std::string keyToMatch = "foo";
- auto query = QUERY("key" << keyToMatch);
+ auto query = BSON("key" << keyToMatch);
TestTask task(keyToMatch, 0, 0);
BSONObj taskBson = task.toBSON();
@@ -281,15 +281,15 @@ TEST_F(PersistentTaskStoreTest, TestWritesPersistAcrossInstances) {
PersistentTaskStore<TestTask> store(kNss);
ASSERT_EQ(store.count(opCtx), 3);
- auto count = store.count(opCtx, QUERY("min" << GTE << 10));
+ auto count = store.count(opCtx, BSON("min" << GTE << 10));
ASSERT_EQ(count, 2);
store.remove(opCtx,
- QUERY("key"
- << "two"));
+ BSON("key"
+ << "two"));
ASSERT_EQ(store.count(opCtx), 2);
- count = store.count(opCtx, QUERY("min" << GTE << 10));
+ count = store.count(opCtx, BSON("min" << GTE << 10));
ASSERT_EQ(count, 1);
}
@@ -297,7 +297,7 @@ TEST_F(PersistentTaskStoreTest, TestWritesPersistAcrossInstances) {
PersistentTaskStore<TestTask> store(kNss);
ASSERT_EQ(store.count(opCtx), 2);
- auto count = store.count(opCtx, QUERY("min" << GTE << 10));
+ auto count = store.count(opCtx, BSON("min" << GTE << 10));
ASSERT_EQ(count, 1);
}
}
@@ -312,16 +312,16 @@ TEST_F(PersistentTaskStoreTest, TestCountWithQuery) {
store.add(opCtx, TestTask{"two", 40, 50});
ASSERT_EQ(store.count(opCtx,
- QUERY("key"
- << "two")),
+ BSON("key"
+ << "two")),
2);
// Remove multipe overlapping ranges.
- store.remove(opCtx, QUERY("min" << 10));
+ store.remove(opCtx, BSON("min" << 10));
ASSERT_EQ(store.count(opCtx,
- QUERY("key"
- << "two")),
+ BSON("key"
+ << "two")),
1);
}
diff --git a/src/mongo/db/query/query_request_helper.cpp b/src/mongo/db/query/query_request_helper.cpp
index 10320583a71..90d6e386d56 100644
--- a/src/mongo/db/query/query_request_helper.cpp
+++ b/src/mongo/db/query/query_request_helper.cpp
@@ -36,6 +36,7 @@
#include "mongo/base/status.h"
#include "mongo/base/status_with.h"
#include "mongo/bson/simple_bsonobj_comparator.h"
+#include "mongo/client/query.h"
#include "mongo/db/commands/test_commands_enabled.h"
#include "mongo/db/dbmessage.h"
@@ -139,10 +140,7 @@ Status initFullQuery(const BSONObj& top, FindCommandRequest* findCommand) {
}
} else if (name.startsWith("$")) {
name = name.substr(1); // chop first char
- if (name == "explain") {
- return Status(ErrorCodes::Error(5856600),
- "the $explain OP_QUERY flag is no longer supported");
- } else if (name == "min") {
+ if (name == "min") {
if (!e.isABSONObj()) {
return Status(ErrorCodes::BadValue, "$min must be a BSONObj");
}
@@ -187,7 +185,8 @@ Status initFullQuery(const BSONObj& top, FindCommandRequest* findCommand) {
Status initFindCommandRequest(int ntoskip,
int queryOptions,
- const BSONObj& queryObj,
+ const BSONObj& filter,
+ const Query& querySettings,
const BSONObj& proj,
FindCommandRequest* findCommand) {
if (!proj.isEmpty()) {
@@ -200,19 +199,12 @@ Status initFindCommandRequest(int ntoskip,
// Initialize flags passed as 'queryOptions' bit vector.
initFromInt(queryOptions, findCommand);
- BSONElement queryField = queryObj["query"];
- if (!queryField.isABSONObj()) {
- queryField = queryObj["$query"];
- }
- if (queryField.isABSONObj()) {
- findCommand->setFilter(queryField.embeddedObject().getOwned());
- Status status = initFullQuery(queryObj, findCommand);
- if (!status.isOK()) {
- return status;
- }
- } else {
- findCommand->setFilter(queryObj.getOwned());
+ findCommand->setFilter(filter.getOwned());
+ Status status = initFullQuery(querySettings.getFullSettingsDeprecated(), findCommand);
+ if (!status.isOK()) {
+ return status;
}
+
// It's not possible to specify readConcern in a legacy query message, so initialize it to
// an empty readConcern object, ie. equivalent to `readConcern: {}`. This ensures that
// mongos passes this empty readConcern to shards.
@@ -393,14 +385,15 @@ void validateCursorResponse(const BSONObj& outputAsBson) {
//
StatusWith<std::unique_ptr<FindCommandRequest>> fromLegacyQuery(NamespaceStringOrUUID nssOrUuid,
- const BSONObj& queryObj,
+ const BSONObj& filter,
+ const Query& querySettings,
const BSONObj& proj,
int ntoskip,
int queryOptions) {
auto findCommand = std::make_unique<FindCommandRequest>(std::move(nssOrUuid));
- Status status =
- initFindCommandRequest(ntoskip, queryOptions, queryObj, proj, findCommand.get());
+ Status status = initFindCommandRequest(
+ ntoskip, queryOptions, filter, querySettings, proj, findCommand.get());
if (!status.isOK()) {
return status;
}
diff --git a/src/mongo/db/query/query_request_helper.h b/src/mongo/db/query/query_request_helper.h
index c925c06dd38..3c7cbc53b89 100644
--- a/src/mongo/db/query/query_request_helper.h
+++ b/src/mongo/db/query/query_request_helper.h
@@ -42,6 +42,7 @@ namespace mongo {
class QueryMessage;
class Status;
+class Query;
template <typename T>
class StatusWith;
@@ -151,7 +152,8 @@ void validateCursorResponse(const BSONObj& outputAsBson);
* Parse the provided legacy query object and parameters to construct a FindCommandRequest.
*/
StatusWith<std::unique_ptr<FindCommandRequest>> fromLegacyQuery(NamespaceStringOrUUID nsOrUuid,
- const BSONObj& queryObj,
+ const BSONObj& filter,
+ const Query& querySettings,
const BSONObj& proj,
int ntoskip,
int queryOptions);
diff --git a/src/mongo/db/query/query_request_test.cpp b/src/mongo/db/query/query_request_test.cpp
index 6eeb8704f26..eba29f8c27e 100644
--- a/src/mongo/db/query/query_request_test.cpp
+++ b/src/mongo/db/query/query_request_test.cpp
@@ -1551,24 +1551,20 @@ TEST(QueryRequestTest, ConvertToFindWithAllowDiskUseFalseSucceeds) {
TEST(QueryRequestTest, ParseFromLegacyQuery) {
const auto kSkip = 1;
const NamespaceString nss("test.testns");
- BSONObj queryObj = fromjson(R"({
- query: {query: 1},
- orderby: {sort: 1},
- $hint: {hint: 1},
- $min: {x: 'min'},
- $max: {x: 'max'}
- })");
unique_ptr<FindCommandRequest> findCommand(assertGet(query_request_helper::fromLegacyQuery(
- nss, queryObj, BSON("proj" << 1), kSkip, QueryOption_Exhaust)));
+ nss,
+ fromjson("{query: 1}") /*filter*/,
+ Query().sort(BSON("sort" << 1)).hint(BSON("hint" << 1)),
+ BSON("proj" << 1),
+ kSkip,
+ QueryOption_Exhaust)));
ASSERT_EQ(*findCommand->getNamespaceOrUUID().nss(), nss);
ASSERT_BSONOBJ_EQ(findCommand->getFilter(), fromjson("{query: 1}"));
ASSERT_BSONOBJ_EQ(findCommand->getProjection(), fromjson("{proj: 1}"));
ASSERT_BSONOBJ_EQ(findCommand->getSort(), fromjson("{sort: 1}"));
ASSERT_BSONOBJ_EQ(findCommand->getHint(), fromjson("{hint: 1}"));
- ASSERT_BSONOBJ_EQ(findCommand->getMin(), fromjson("{x: 'min'}"));
- ASSERT_BSONOBJ_EQ(findCommand->getMax(), fromjson("{x: 'max'}"));
ASSERT_EQ(findCommand->getSkip(), boost::optional<int64_t>(kSkip));
ASSERT_FALSE(findCommand->getNtoreturn());
ASSERT_EQ(findCommand->getSingleBatch(), false);
@@ -1579,15 +1575,14 @@ TEST(QueryRequestTest, ParseFromLegacyQuery) {
TEST(QueryRequestTest, ParseFromLegacyQueryOplogReplayFlagAllowed) {
const NamespaceString nss("test.testns");
- auto queryObj = fromjson("{query: {query: 1}, orderby: {sort: 1}}");
const BSONObj projectionObj{};
const auto nToSkip = 0;
// Test that parsing succeeds even if the oplog replay bit is set in the OP_QUERY message. This
// flag may be set by old clients.
auto options = QueryOption_OplogReplay_DEPRECATED;
- unique_ptr<FindCommandRequest> findCommand(assertGet(
- query_request_helper::fromLegacyQuery(nss, queryObj, projectionObj, nToSkip, options)));
+ unique_ptr<FindCommandRequest> findCommand(assertGet(query_request_helper::fromLegacyQuery(
+ nss, fromjson("{query: 1}"), Query().sort("sort", 1), projectionObj, nToSkip, options)));
// Verify that if we reserialize the find command, the 'oplogReplay' field
// does not appear.
@@ -1603,12 +1598,9 @@ TEST(QueryRequestTest, ParseFromLegacyQueryOplogReplayFlagAllowed) {
}
TEST(QueryRequestTest, ParseFromLegacyQueryUnwrapped) {
- BSONObj queryObj = fromjson(R"({
- foo: 1
- })");
const NamespaceString nss("test.testns");
- unique_ptr<FindCommandRequest> findCommand(assertGet(
- query_request_helper::fromLegacyQuery(nss, queryObj, BSONObj(), 0, QueryOption_Exhaust)));
+ unique_ptr<FindCommandRequest> findCommand(assertGet(query_request_helper::fromLegacyQuery(
+ nss, fromjson("{foo: 1}"), Query(), BSONObj(), 0, QueryOption_Exhaust)));
ASSERT_EQ(*findCommand->getNamespaceOrUUID().nss(), nss);
ASSERT_BSONOBJ_EQ(findCommand->getFilter(), fromjson("{foo: 1}"));
@@ -1628,20 +1620,6 @@ TEST(QueryRequestHelperTest, ValidateResponseWrongDataType) {
ErrorCodes::TypeMismatch);
}
-TEST(QueryRequestTest, ParseFromLegacyQueryExplainError) {
- BSONObj queryObj = fromjson(R"({
- query: {query: 1},
- $explain: false
- })");
-
- const NamespaceString nss("test.testns");
- ASSERT_EQUALS(
- query_request_helper::fromLegacyQuery(nss, queryObj, BSONObj(), 0, QueryOption_Exhaust)
- .getStatus()
- .code(),
- static_cast<ErrorCodes::Error>(5856600));
-}
-
class QueryRequestTest : public ServiceContextTest {};
TEST_F(QueryRequestTest, ParseFromUUID) {
diff --git a/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp b/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp
index 72ed4b8e9b7..1b341ab4b11 100644
--- a/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp
+++ b/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp
@@ -50,7 +50,7 @@ BSONObj getPersistedDefaultRWConcernDocument(OperationContext* opCtx) {
DBDirectClient client(opCtx);
return client.findOne(NamespaceString::kConfigSettingsNamespace.toString(),
- QUERY("_id" << ReadWriteConcernDefaults::kPersistedDocumentId));
+ BSON("_id" << ReadWriteConcernDefaults::kPersistedDocumentId));
}
} // namespace
diff --git a/src/mongo/db/repl/all_database_cloner.cpp b/src/mongo/db/repl/all_database_cloner.cpp
index 4987ef3e937..8a7159b5dde 100644
--- a/src/mongo/db/repl/all_database_cloner.cpp
+++ b/src/mongo/db/repl/all_database_cloner.cpp
@@ -132,7 +132,7 @@ BaseCloner::AfterStageBehavior AllDatabaseCloner::getInitialSyncIdStage() {
if (wireVersion < WireVersion::RESUMABLE_INITIAL_SYNC)
return kContinueNormally;
auto initialSyncId = getClient()->findOne(
- ReplicationConsistencyMarkersImpl::kDefaultInitialSyncIdNamespace.toString(), Query());
+ ReplicationConsistencyMarkersImpl::kDefaultInitialSyncIdNamespace.toString(), BSONObj{});
uassert(ErrorCodes::InitialSyncFailure,
"Cannot retrieve sync source initial sync ID",
!initialSyncId.isEmpty());
diff --git a/src/mongo/db/repl/all_database_cloner_test.cpp b/src/mongo/db/repl/all_database_cloner_test.cpp
index 3204c419a12..b77caa19e47 100644
--- a/src/mongo/db/repl/all_database_cloner_test.cpp
+++ b/src/mongo/db/repl/all_database_cloner_test.cpp
@@ -384,7 +384,8 @@ TEST_F(AllDatabaseClonerTest, RetriesListDatabasesButInitialSyncIdChanges) {
// Clear and change the initial sync ID
_mockServer->remove(
- ReplicationConsistencyMarkersImpl::kDefaultInitialSyncIdNamespace.toString(), Query());
+ ReplicationConsistencyMarkersImpl::kDefaultInitialSyncIdNamespace.toString(),
+ BSONObj{} /*filter*/);
_mockServer->insert(
ReplicationConsistencyMarkersImpl::kDefaultInitialSyncIdNamespace.toString(),
BSON("_id" << UUID::gen()));
diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp
index e8a89e190ce..41b8c8fd858 100644
--- a/src/mongo/db/repl/apply_ops.cpp
+++ b/src/mongo/db/repl/apply_ops.cpp
@@ -314,7 +314,9 @@ Status _checkPrecondition(OperationContext* opCtx,
}
DBDirectClient db(opCtx);
- BSONObj realres = db.findOne(nss.ns(), preCondition["q"].Obj());
+ // The preconditions come in "q: {{query: {...}, orderby: ..., etc.}}" format.
+ auto preconditionQuery = Query::fromBSONDeprecated(preCondition["q"].Obj());
+ BSONObj realres = db.findOne(nss.ns(), preconditionQuery.getFilter(), preconditionQuery);
// Get collection default collation.
auto databaseHolder = DatabaseHolder::get(opCtx);
diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp
index 35b1ddf6d7b..191f1331101 100644
--- a/src/mongo/db/repl/collection_cloner.cpp
+++ b/src/mongo/db/repl/collection_cloner.cpp
@@ -308,12 +308,11 @@ void CollectionCloner::runQuery() {
if (_resumeToken) {
// Resume the query from where we left off.
LOGV2_DEBUG(21133, 1, "Collection cloner will resume the last successful query");
- query = QUERY("query" << BSONObj() << "$_requestResumeToken" << true << "$_resumeAfter"
- << _resumeToken.get());
+ query.requestResumeToken(true).resumeAfter(_resumeToken.get());
} else {
// New attempt at a resumable query.
LOGV2_DEBUG(21134, 1, "Collection cloner will run a new query");
- query = QUERY("query" << BSONObj() << "$_requestResumeToken" << true);
+ query.requestResumeToken(true);
}
query.hint(BSON("$natural" << 1));
}
@@ -326,6 +325,7 @@ void CollectionCloner::runQuery() {
try {
getClient()->query([this](DBClientCursorBatchIterator& iter) { handleNextBatch(iter); },
_sourceDbAndUuid,
+ BSONObj{},
query,
nullptr /* fieldsToReturn */,
QueryOption_NoCursorTimeout | QueryOption_SecondaryOk |
diff --git a/src/mongo/db/repl/initial_sync_base_cloner.cpp b/src/mongo/db/repl/initial_sync_base_cloner.cpp
index 314ef926557..0c00012b3f7 100644
--- a/src/mongo/db/repl/initial_sync_base_cloner.cpp
+++ b/src/mongo/db/repl/initial_sync_base_cloner.cpp
@@ -128,7 +128,8 @@ Status InitialSyncBaseCloner::checkInitialSyncIdIsUnchanged() {
BSONObj initialSyncId;
try {
initialSyncId = getClient()->findOne(
- ReplicationConsistencyMarkersImpl::kDefaultInitialSyncIdNamespace.toString(), Query());
+ ReplicationConsistencyMarkersImpl::kDefaultInitialSyncIdNamespace.toString(),
+ BSONObj{});
} catch (DBException& e) {
if (ErrorCodes::isRetriableError(e)) {
auto status = e.toStatus().withContext(
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index 4d50cb7e241..d3f0653a744 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -1967,8 +1967,11 @@ void setNewTimestamp(ServiceContext* service, const Timestamp& newTime) {
void initTimestampFromOplog(OperationContext* opCtx, const NamespaceString& oplogNss) {
DBDirectClient c(opCtx);
static const BSONObj reverseNaturalObj = BSON("$natural" << -1);
- BSONObj lastOp =
- c.findOne(oplogNss.ns(), Query().sort(reverseNaturalObj), nullptr, QueryOption_SecondaryOk);
+ BSONObj lastOp = c.findOne(oplogNss.ns(),
+ BSONObj{},
+ Query().sort(reverseNaturalObj),
+ nullptr,
+ QueryOption_SecondaryOk);
if (!lastOp.isEmpty()) {
LOGV2_DEBUG(21256, 1, "replSet setting last Timestamp");
diff --git a/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp b/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp
index cda89640fdf..9a2bc34390f 100644
--- a/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp
+++ b/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp
@@ -350,7 +350,7 @@ void checkTxnTable(OperationContext* opCtx,
boost::optional<DurableTxnStateEnum> expectedState) {
DBDirectClient client(opCtx);
auto result = client.findOne(NamespaceString::kSessionTransactionsTableNamespace.ns(),
- {BSON(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON())});
+ BSON(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON()));
ASSERT_FALSE(result.isEmpty());
auto txnRecord =
@@ -393,7 +393,7 @@ StatusWith<BSONObj> CollectionReader::next() {
bool docExists(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc) {
DBDirectClient client(opCtx);
- auto result = client.findOne(nss.ns(), {doc});
+ auto result = client.findOne(nss.ns(), doc);
return !result.isEmpty();
}
diff --git a/src/mongo/db/repl/oplog_fetcher.cpp b/src/mongo/db/repl/oplog_fetcher.cpp
index 08b5039f404..b7961ef871d 100644
--- a/src/mongo/db/repl/oplog_fetcher.cpp
+++ b/src/mongo/db/repl/oplog_fetcher.cpp
@@ -264,8 +264,12 @@ OpTime OplogFetcher::getLastOpTimeFetched_forTest() const {
return _getLastOpTimeFetched();
}
-BSONObj OplogFetcher::getFindQuery_forTest(long long findTimeout) const {
- return _makeFindQuery(findTimeout);
+BSONObj OplogFetcher::getFindQueryFilter_forTest() const {
+ return _makeFindQueryFilter();
+}
+
+Query OplogFetcher::getFindQuerySettings_forTest(long long findTimeout) const {
+ return _makeFindQuerySettings(findTimeout);
}
Milliseconds OplogFetcher::getAwaitDataTimeout_forTest() const {
@@ -572,11 +576,11 @@ AggregateCommandRequest OplogFetcher::_makeAggregateCommandRequest(long long max
return aggRequest;
}
-BSONObj OplogFetcher::_makeFindQuery(long long findTimeout) const {
+BSONObj OplogFetcher::_makeFindQueryFilter() const {
BSONObjBuilder queryBob;
auto lastOpTimeFetched = _getLastOpTimeFetched();
- BSONObjBuilder filterBob(queryBob.subobjStart("query"));
+ BSONObjBuilder filterBob;
filterBob.append("ts", BSON("$gte" << lastOpTimeFetched.getTimestamp()));
// Handle caller-provided filter.
if (!_config.queryFilter.isEmpty()) {
@@ -584,34 +588,34 @@ BSONObj OplogFetcher::_makeFindQuery(long long findTimeout) const {
"$or",
BSON_ARRAY(_config.queryFilter << BSON("ts" << lastOpTimeFetched.getTimestamp())));
}
- filterBob.done();
+ return filterBob.obj();
+}
- queryBob.append("$maxTimeMS", findTimeout);
+Query OplogFetcher::_makeFindQuerySettings(long long findTimeout) const {
+ Query query = Query().maxTimeMS(findTimeout);
if (_config.requestResumeToken) {
- queryBob.append("$hint", BSON("$natural" << 1));
- queryBob.append("$_requestResumeToken", true);
+ query.hint(BSON("$natural" << 1)).requestResumeToken(true);
}
auto lastCommittedWithCurrentTerm =
_dataReplicatorExternalState->getCurrentTermAndLastCommittedOpTime();
auto term = lastCommittedWithCurrentTerm.value;
if (term != OpTime::kUninitializedTerm) {
- queryBob.append("term", term);
+ query.term(term);
}
if (_config.queryReadConcern.isEmpty()) {
// This ensures that the sync source waits for all earlier oplog writes to be visible.
// Since Timestamp(0, 0) isn't allowed, Timestamp(0, 1) is the minimal we can use.
- queryBob.append("readConcern",
- BSON("level"
- << "local"
- << "afterClusterTime" << Timestamp(0, 1)));
+ query.readConcern(BSON("level"
+ << "local"
+ << "afterClusterTime" << Timestamp(0, 1)));
} else {
// Caller-provided read concern.
- queryBob.appendElements(_config.queryReadConcern.toBSON());
+ query.appendElements(_config.queryReadConcern.toBSON());
}
- return queryBob.obj();
+ return query;
}
Status OplogFetcher::_createNewCursor(bool initialFind) {
@@ -642,7 +646,8 @@ Status OplogFetcher::_createNewCursor(bool initialFind) {
_cursor = std::make_unique<DBClientCursor>(
_conn.get(),
_nss,
- _makeFindQuery(maxTimeMs),
+ _makeFindQueryFilter(),
+ _makeFindQuerySettings(maxTimeMs),
0 /* nToReturn */,
0 /* nToSkip */,
nullptr /* fieldsToReturn */,
@@ -1039,7 +1044,7 @@ Status OplogFetcher::_checkTooStaleToSyncFromSource(const OpTime lastFetched,
auto query = Query().sort(BSON("$natural" << 1));
// Since this function is called after the first batch, the exhaust stream has not been
// started yet. As a result, using the same connection is safe.
- remoteFirstOplogEntry = _conn->findOne(_nss.ns(), query);
+ remoteFirstOplogEntry = _conn->findOne(_nss.ns(), BSONObj{}, query);
} catch (DBException& e) {
// If an error occurs with the query, throw an error.
return Status(ErrorCodes::TooStaleToSyncFromSource, e.reason());
diff --git a/src/mongo/db/repl/oplog_fetcher.h b/src/mongo/db/repl/oplog_fetcher.h
index 6a67613e7e4..8c854024ceb 100644
--- a/src/mongo/db/repl/oplog_fetcher.h
+++ b/src/mongo/db/repl/oplog_fetcher.h
@@ -275,7 +275,8 @@ public:
/**
* Returns the `find` query run on the sync source's oplog.
*/
- BSONObj getFindQuery_forTest(long long findTimeout) const;
+ BSONObj getFindQueryFilter_forTest() const;
+ Query getFindQuerySettings_forTest(long long findTimeout) const;
/**
* Returns the OpTime of the last oplog entry fetched and processed.
@@ -389,7 +390,8 @@ private:
* whether this is the initial attempt to create the `find` query to determine what the find
* timeout should be.
*/
- BSONObj _makeFindQuery(long long findTimeout) const;
+ BSONObj _makeFindQueryFilter() const;
+ Query _makeFindQuerySettings(long long findTimeout) const;
/**
* Gets the next batch from the exhaust cursor.
diff --git a/src/mongo/db/repl/oplog_fetcher_test.cpp b/src/mongo/db/repl/oplog_fetcher_test.cpp
index ea790173e49..1e3b9c3813a 100644
--- a/src/mongo/db/repl/oplog_fetcher_test.cpp
+++ b/src/mongo/db/repl/oplog_fetcher_test.cpp
@@ -793,12 +793,14 @@ TEST_F(OplogFetcherTest,
// Test that the correct maxTimeMS is set if this is the initial 'find' query.
auto oplogFetcher = makeOplogFetcher();
auto findTimeout = durationCount<Milliseconds>(oplogFetcher->getInitialFindMaxTime_forTest());
- auto queryObj = oplogFetcher->getFindQuery_forTest(findTimeout);
+
+ auto filter = oplogFetcher->getFindQueryFilter_forTest();
+ ASSERT_BSONOBJ_EQ(BSON("ts" << BSON("$gte" << lastFetched.getTimestamp())), filter);
+
+ auto queryObj =
+ (oplogFetcher->getFindQuerySettings_forTest(findTimeout)).getFullSettingsDeprecated();
ASSERT_EQUALS(60000, queryObj.getIntField("$maxTimeMS"));
- ASSERT_EQUALS(mongo::BSONType::Object, queryObj["query"].type());
- ASSERT_BSONOBJ_EQ(BSON("ts" << BSON("$gte" << lastFetched.getTimestamp())),
- queryObj["query"].Obj());
ASSERT_EQUALS(mongo::BSONType::Object, queryObj["readConcern"].type());
ASSERT_BSONOBJ_EQ(BSON("level"
<< "local"
@@ -812,14 +814,15 @@ TEST_F(OplogFetcherTest,
dataReplicatorExternalState->currentTerm = OpTime::kUninitializedTerm;
auto oplogFetcher = makeOplogFetcher();
+ auto filter = oplogFetcher->getFindQueryFilter_forTest();
+ ASSERT_BSONOBJ_EQ(BSON("ts" << BSON("$gte" << lastFetched.getTimestamp())), filter);
+
// Test that the correct maxTimeMS is set if we are retrying the 'find' query.
auto findTimeout = durationCount<Milliseconds>(oplogFetcher->getRetriedFindMaxTime_forTest());
- auto queryObj = oplogFetcher->getFindQuery_forTest(findTimeout);
+ auto queryObj =
+ (oplogFetcher->getFindQuerySettings_forTest(findTimeout)).getFullSettingsDeprecated();
ASSERT_EQUALS(2000, queryObj.getIntField("$maxTimeMS"));
- ASSERT_EQUALS(mongo::BSONType::Object, queryObj["query"].type());
- ASSERT_BSONOBJ_EQ(BSON("ts" << BSON("$gte" << lastFetched.getTimestamp())),
- queryObj["query"].Obj());
ASSERT_EQUALS(mongo::BSONType::Object, queryObj["readConcern"].type());
ASSERT_BSONOBJ_EQ(BSON("level"
<< "local"
diff --git a/src/mongo/db/repl/oplog_interface_remote.cpp b/src/mongo/db/repl/oplog_interface_remote.cpp
index 2008daae903..1a9e3e6a180 100644
--- a/src/mongo/db/repl/oplog_interface_remote.cpp
+++ b/src/mongo/db/repl/oplog_interface_remote.cpp
@@ -85,6 +85,7 @@ std::unique_ptr<OplogInterface::Iterator> OplogInterfaceRemote::makeIterator() c
const BSONObj fields = BSON("ts" << 1 << "t" << 1LL);
return std::unique_ptr<OplogInterface::Iterator>(
new OplogIteratorRemote(_getConnection()->query(NamespaceString(_collectionName),
+ BSONObj{},
query,
0,
0,
diff --git a/src/mongo/db/repl/primary_only_service.cpp b/src/mongo/db/repl/primary_only_service.cpp
index e7e3779e97f..8253317329b 100644
--- a/src/mongo/db/repl/primary_only_service.cpp
+++ b/src/mongo/db/repl/primary_only_service.cpp
@@ -657,7 +657,7 @@ void PrimaryOnlyService::_rebuildInstances(long long term) noexcept {
Status(ErrorCodes::InternalError, "Querying state documents failed"));
}
- auto cursor = client.query(ns, Query());
+ auto cursor = client.query(ns, BSONObj{});
while (cursor->more()) {
stateDocuments.push_back(cursor->nextSafe().getOwned());
}
diff --git a/src/mongo/db/repl/replication_recovery.cpp b/src/mongo/db/repl/replication_recovery.cpp
index 24d885ddaba..4ba3692bb9a 100644
--- a/src/mongo/db/repl/replication_recovery.cpp
+++ b/src/mongo/db/repl/replication_recovery.cpp
@@ -144,7 +144,8 @@ public:
? BSON("$gte" << _oplogApplicationStartPoint << "$lte" << *_oplogApplicationEndPoint)
: BSON("$gte" << _oplogApplicationStartPoint);
_cursor = _client->query(NamespaceString::kRsOplogNamespace,
- QUERY("ts" << predicate),
+ BSON("ts" << predicate),
+ /*querySettings*/ Query(),
/*limit*/ 0,
/*skip*/ 0,
/*projection*/ nullptr,
diff --git a/src/mongo/db/repl/roll_back_local_operations_test.cpp b/src/mongo/db/repl/roll_back_local_operations_test.cpp
index 665955afbba..6a4b3ea84c9 100644
--- a/src/mongo/db/repl/roll_back_local_operations_test.cpp
+++ b/src/mongo/db/repl/roll_back_local_operations_test.cpp
@@ -321,7 +321,8 @@ public:
using DBClientConnection::query;
std::unique_ptr<DBClientCursor> query(const NamespaceStringOrUUID& nsOrUuid,
- Query query,
+ const BSONObj& filter,
+ const Query& querySettings,
int limit,
int nToSkip,
const BSONObj* fieldsToReturn,
diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp
index 639a20df546..ebeda465968 100644
--- a/src/mongo/db/repl/rollback_impl.cpp
+++ b/src/mongo/db/repl/rollback_impl.cpp
@@ -477,10 +477,10 @@ void RollbackImpl::_restoreTxnsTableEntryFromRetryableWrites(OperationContext* o
<< "fromMigrate" << true);
auto cursor = client->query(
NamespaceString::kRsOplogNamespace,
- QUERY("ts" << BSON("$gt" << stableTimestamp) << "txnNumber" << BSON("$exists" << true)
- << "stmtId" << BSON("$exists" << true) << "prevOpTime.ts"
- << BSON("$gte" << Timestamp(1, 0) << "$lte" << stableTimestamp) << "$or"
- << BSON_ARRAY(filter << filterFromMigration)));
+ BSON("ts" << BSON("$gt" << stableTimestamp) << "txnNumber" << BSON("$exists" << true)
+ << "stmtId" << BSON("$exists" << true) << "prevOpTime.ts"
+ << BSON("$gte" << Timestamp(1, 0) << "$lte" << stableTimestamp) << "$or"
+ << BSON_ARRAY(filter << filterFromMigration)));
while (cursor->more()) {
auto doc = cursor->next();
auto swEntry = OplogEntry::parse(doc);
diff --git a/src/mongo/db/repl/rollback_source_impl.cpp b/src/mongo/db/repl/rollback_source_impl.cpp
index 2328705057b..c6184fb7259 100644
--- a/src/mongo/db/repl/rollback_source_impl.cpp
+++ b/src/mongo/db/repl/rollback_source_impl.cpp
@@ -66,9 +66,9 @@ int RollbackSourceImpl::getRollbackId() const {
}
BSONObj RollbackSourceImpl::getLastOperation() const {
- const Query query = Query().sort(BSON("$natural" << -1));
return _getConnection()->findOne(_collectionName,
- query,
+ BSONObj{},
+ Query().sort(BSON("$natural" << -1)),
nullptr,
QueryOption_SecondaryOk,
ReadConcernArgs::kImplicitDefault);
@@ -78,6 +78,7 @@ BSONObj RollbackSourceImpl::findOne(const NamespaceString& nss, const BSONObj& f
return _getConnection()
->findOne(nss.toString(),
filter,
+ Query(),
nullptr,
QueryOption_SecondaryOk,
ReadConcernArgs::kImplicitDefault)
diff --git a/src/mongo/db/repl/tenant_collection_cloner.cpp b/src/mongo/db/repl/tenant_collection_cloner.cpp
index 2409c1e81da..75827e5f6b3 100644
--- a/src/mongo/db/repl/tenant_collection_cloner.cpp
+++ b/src/mongo/db/repl/tenant_collection_cloner.cpp
@@ -212,7 +212,8 @@ BaseCloner::AfterStageBehavior TenantCollectionCloner::checkIfDonorCollectionIsE
auto fieldsToReturn = BSON("_id" << 1);
auto cursor =
getClient()->query(_sourceDbAndUuid,
- {} /* Query */,
+ BSONObj{} /* filter */,
+ Query() /* querySettings */,
1 /* limit */,
0 /* skip */,
&fieldsToReturn,
@@ -348,8 +349,8 @@ BaseCloner::AfterStageBehavior TenantCollectionCloner::createCollectionStage() {
ON_BLOCK_EXIT([&opCtx] { tenantMigrationRecipientInfo(opCtx.get()) = boost::none; });
auto fieldsToReturn = BSON("_id" << 1);
- _lastDocId =
- client.findOne(_existingNss->ns(), Query().sort(BSON("_id" << -1)), &fieldsToReturn);
+ _lastDocId = client.findOne(
+ _existingNss->ns(), BSONObj{}, Query().sort(BSON("_id" << -1)), &fieldsToReturn);
if (!_lastDocId.isEmpty()) {
// The collection is not empty. Skip creating indexes and resume cloning from the last
// document.
@@ -462,21 +463,21 @@ BaseCloner::AfterStageBehavior TenantCollectionCloner::queryStage() {
}
void TenantCollectionCloner::runQuery() {
- auto query = _lastDocId.isEmpty()
- ? QUERY("query" << BSONObj())
- // Use $expr and the aggregation version of $gt to avoid type bracketing.
- : QUERY("$expr" << BSON("$gt" << BSON_ARRAY("$_id" << _lastDocId["_id"])));
- if (_collectionOptions.clusteredIndex) {
+ const BSONObj& filter = _lastDocId.isEmpty()
+ ? BSONObj{} // Use $expr and the aggregation version of $gt to avoid type bracketing.
+ : BSON("$expr" << BSON("$gt" << BSON_ARRAY("$_id" << _lastDocId["_id"])));
+
+ auto query = _collectionOptions.clusteredIndex
// RecordIds are _id values and has no separate _id index
- query.hint(BSON("$natural" << 1));
- } else {
- query.hint(BSON("_id" << 1));
- }
+ ? Query().hint(BSON("$natural" << 1))
+ : Query().hint(BSON("_id" << 1));
+
// Any errors that are thrown here (including NamespaceNotFound) will be handled on the stage
// level.
getClient()->query([this](DBClientCursorBatchIterator& iter) { handleNextBatch(iter); },
_sourceDbAndUuid,
+ filter,
query,
nullptr /* fieldsToReturn */,
QueryOption_NoCursorTimeout | QueryOption_SecondaryOk |
diff --git a/src/mongo/db/repl/tenant_migration_recipient_service.cpp b/src/mongo/db/repl/tenant_migration_recipient_service.cpp
index e3e2d429f29..fa3c2053719 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_service.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_service.cpp
@@ -573,6 +573,7 @@ OpTime TenantMigrationRecipientService::Instance::_getDonorMajorityOpTime(
BSON(OplogEntry::kTimestampFieldName << 1 << OplogEntry::kTermFieldName << 1);
auto majorityOpTimeBson =
client->findOne(NamespaceString::kRsOplogNamespace.ns(),
+ BSONObj{},
Query().sort("$natural", -1),
&oplogOpTimeFields,
QueryOption_SecondaryOk,
@@ -867,8 +868,8 @@ void TenantMigrationRecipientService::Instance::_getStartOpTimesFromDonor(WithLo
auto transactionTableOpTimeFields = BSON(SessionTxnRecord::kStartOpTimeFieldName << 1);
auto earliestOpenTransactionBson = _client->findOne(
NamespaceString::kSessionTransactionsTableNamespace.ns(),
- QUERY("state" << BSON("$in" << BSON_ARRAY(preparedState << inProgressState)))
- .sort(SessionTxnRecord::kStartOpTimeFieldName.toString(), 1),
+ BSON("state" << BSON("$in" << BSON_ARRAY(preparedState << inProgressState))),
+ Query().sort(SessionTxnRecord::kStartOpTimeFieldName.toString(), 1),
&transactionTableOpTimeFields,
QueryOption_SecondaryOk,
ReadConcernArgs(ReadConcernLevel::kMajorityReadConcern).toBSONInner());
@@ -1875,6 +1876,7 @@ void TenantMigrationRecipientService::Instance::_fetchAndStoreDonorClusterTimeKe
std::vector<ExternalKeysCollectionDocument> keyDocs;
auto cursor =
_client->query(NamespaceString::kKeysCollectionNamespace,
+ BSONObj{},
Query().readPref(_readPreference.pref, _readPreference.tags.getTagBSON()));
while (cursor->more()) {
const auto doc = cursor->nextSafe().getOwned();
@@ -1892,7 +1894,8 @@ void TenantMigrationRecipientService::Instance::_compareRecipientAndDonorFCV() c
auto donorFCVbson =
_client->findOne(NamespaceString::kServerConfigurationNamespace.ns(),
- QUERY("_id" << FeatureCompatibilityVersionParser::kParameterName),
+ BSON("_id" << FeatureCompatibilityVersionParser::kParameterName),
+ Query(),
nullptr,
QueryOption_SecondaryOk,
ReadConcernArgs(ReadConcernLevel::kMajorityReadConcern).toBSONInner());
diff --git a/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp b/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
index bb85e38ae82..0b44a1155f3 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp
@@ -352,7 +352,7 @@ protected:
const std::string& nss,
const std::vector<HostAndPort>& hosts) {
for (const auto& host : hosts) {
- replSet->getNode(host.toString())->remove(nss, Query());
+ replSet->getNode(host.toString())->remove(nss, BSONObj{} /*filter*/);
}
}
diff --git a/src/mongo/db/repl/transaction_oplog_application.cpp b/src/mongo/db/repl/transaction_oplog_application.cpp
index 6ff76d34087..103a5c4e149 100644
--- a/src/mongo/db/repl/transaction_oplog_application.cpp
+++ b/src/mongo/db/repl/transaction_oplog_application.cpp
@@ -556,8 +556,8 @@ void reconstructPreparedTransactions(OperationContext* opCtx, repl::OplogApplica
DBDirectClient client(opCtx);
const auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace,
- {BSON("state"
- << "prepared")});
+ BSON("state"
+ << "prepared"));
// Iterate over each entry in the transactions table that has a prepared transaction.
while (cursor->more()) {
diff --git a/src/mongo/db/rs_local_client.cpp b/src/mongo/db/rs_local_client.cpp
index f4a668bed7c..9c6a455e70d 100644
--- a/src/mongo/db/rs_local_client.cpp
+++ b/src/mongo/db/rs_local_client.cpp
@@ -124,18 +124,18 @@ StatusWith<Shard::QueryResponse> RSLocalClient::queryOnce(
}
DBDirectClient client(opCtx);
- Query fullQuery(query);
+ Query querySettings;
if (!sort.isEmpty()) {
- fullQuery.sort(sort);
+ querySettings.sort(sort);
}
if (hint) {
- fullQuery.hint(*hint);
+ querySettings.hint(*hint);
}
- fullQuery.readPref(readPref.pref, BSONArray());
+ querySettings.readPref(readPref.pref, BSONArray());
try {
std::unique_ptr<DBClientCursor> cursor =
- client.query(nss, fullQuery, limit.get_value_or(0));
+ client.query(nss, query, querySettings, limit.get_value_or(0));
if (!cursor) {
return {ErrorCodes::OperationFailed,
diff --git a/src/mongo/db/s/chunk_splitter.cpp b/src/mongo/db/s/chunk_splitter.cpp
index ee5fc3b8a16..01b84977aa3 100644
--- a/src/mongo/db/s/chunk_splitter.cpp
+++ b/src/mongo/db/s/chunk_splitter.cpp
@@ -172,6 +172,7 @@ BSONObj findExtremeKeyForShard(OperationContext* opCtx,
// upper bound. Chunk range upper bounds are exclusive so skip a document to
// make the lower half of the split end up with a single document.
std::unique_ptr<DBClientCursor> cursor = client.query(nss,
+ BSONObj{},
q,
1, /* limit */
1 /* nToSkip */);
@@ -185,7 +186,7 @@ BSONObj findExtremeKeyForShard(OperationContext* opCtx,
end = cursor->next().getOwned();
}
} else {
- end = client.findOne(nss.ns(), q);
+ end = client.findOne(nss.ns(), BSONObj{}, q);
}
if (end.isEmpty()) {
diff --git a/src/mongo/db/s/config/configsvr_abort_reshard_collection_command.cpp b/src/mongo/db/s/config/configsvr_abort_reshard_collection_command.cpp
index 69fc86df0e6..8279c18a332 100644
--- a/src/mongo/db/s/config/configsvr_abort_reshard_collection_command.cpp
+++ b/src/mongo/db/s/config/configsvr_abort_reshard_collection_command.cpp
@@ -65,7 +65,7 @@ void assertExistsReshardingDocument(OperationContext* opCtx, UUID reshardingUUID
boost::optional<ReshardingCoordinatorDocument> docOptional;
store.forEach(opCtx,
- QUERY(ReshardingCoordinatorDocument::kReshardingUUIDFieldName << reshardingUUID),
+ BSON(ReshardingCoordinatorDocument::kReshardingUUIDFieldName << reshardingUUID),
[&](const ReshardingCoordinatorDocument& doc) {
docOptional.emplace(doc);
return false;
diff --git a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
index 8434c02eeaa..d1857fe231a 100644
--- a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
+++ b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp
@@ -163,7 +163,7 @@ DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx,
(std::string) "^" + pcrecpp::RE::QuoteMeta(dbName.toString()) + "$",
"i");
- auto dbDoc = client.findOne(DatabaseType::ConfigNS.ns(), {queryBuilder.obj()});
+ auto dbDoc = client.findOne(DatabaseType::ConfigNS.ns(), queryBuilder.obj());
auto const [primaryShardPtr, database] = [&] {
if (!dbDoc.isEmpty()) {
auto actualDb = uassertStatusOK(DatabaseType::fromBSON(dbDoc));
diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp
index f514802a291..4ece89a7c0e 100644
--- a/src/mongo/db/s/create_collection_coordinator.cpp
+++ b/src/mongo/db/s/create_collection_coordinator.cpp
@@ -156,7 +156,7 @@ bool checkIfCollectionIsEmpty(OperationContext* opCtx, const NamespaceString& ns
// command doesn't just consult the cached metadata, which may not always be
// correct
DBDirectClient localClient(opCtx);
- return localClient.findOne(nss.ns(), Query()).isEmpty();
+ return localClient.findOne(nss.ns(), BSONObj{}).isEmpty();
}
int getNumShards(OperationContext* opCtx) {
diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
index ffdf12f08c8..72c4000e4eb 100644
--- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
+++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp
@@ -137,8 +137,8 @@ protected:
ASSERT_GT(response["n"].Int(), 0);
}
- void updateDocsInShardedCollection(BSONObj query, BSONObj updated) {
- auto response = client()->updateAcknowledged(kNss.ns(), query, updated);
+ void updateDocsInShardedCollection(BSONObj filter, BSONObj updated) {
+ auto response = client()->updateAcknowledged(kNss.ns(), filter, updated);
ASSERT_OK(getStatusFromWriteCommandReply(response));
ASSERT_GT(response["n"].Int(), 0);
}
diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp
index 99a11b8a251..bcb36814568 100644
--- a/src/mongo/db/s/migration_util.cpp
+++ b/src/mongo/db/s/migration_util.cpp
@@ -321,11 +321,11 @@ ChunkRange extendOrTruncateBoundsForMetadata(const CollectionMetadata& metadata,
}
}
-Query overlappingRangeQuery(const ChunkRange& range, const UUID& uuid) {
- return QUERY(RangeDeletionTask::kCollectionUuidFieldName
- << uuid << RangeDeletionTask::kRangeFieldName + "." + ChunkRange::kMinKey << LT
- << range.getMax() << RangeDeletionTask::kRangeFieldName + "." + ChunkRange::kMaxKey
- << GT << range.getMin());
+BSONObj overlappingRangeQuery(const ChunkRange& range, const UUID& uuid) {
+ return BSON(RangeDeletionTask::kCollectionUuidFieldName
+ << uuid << RangeDeletionTask::kRangeFieldName + "." + ChunkRange::kMinKey << LT
+ << range.getMax() << RangeDeletionTask::kRangeFieldName + "." + ChunkRange::kMaxKey
+ << GT << range.getMin());
}
size_t checkForConflictingDeletions(OperationContext* opCtx,
@@ -501,7 +501,7 @@ ExecutorFuture<void> submitRangeDeletionTask(OperationContext* opCtx,
void submitPendingDeletions(OperationContext* opCtx) {
PersistentTaskStore<RangeDeletionTask> store(NamespaceString::kRangeDeletionNamespace);
- auto query = QUERY("pending" << BSON("$exists" << false));
+ auto query = BSON("pending" << BSON("$exists" << false));
store.forEach(opCtx, query, [&opCtx](const RangeDeletionTask& deletionTask) {
migrationutil::submitRangeDeletionTask(opCtx, deletionTask).getAsync([](auto) {});
@@ -692,7 +692,7 @@ void persistCommitDecision(OperationContext* opCtx,
PersistentTaskStore<MigrationCoordinatorDocument> store(
NamespaceString::kMigrationCoordinatorsNamespace);
store.upsert(opCtx,
- QUERY(MigrationCoordinatorDocument::kIdFieldName << migrationDoc.getId()),
+ BSON(MigrationCoordinatorDocument::kIdFieldName << migrationDoc.getId()),
migrationDoc.toBSON());
if (hangInPersistMigrateCommitDecisionThenSimulateErrorUninterruptible.shouldFail()) {
@@ -711,7 +711,7 @@ void persistAbortDecision(OperationContext* opCtx,
PersistentTaskStore<MigrationCoordinatorDocument> store(
NamespaceString::kMigrationCoordinatorsNamespace);
store.upsert(opCtx,
- QUERY(MigrationCoordinatorDocument::kIdFieldName << migrationDoc.getId()),
+ BSON(MigrationCoordinatorDocument::kIdFieldName << migrationDoc.getId()),
migrationDoc.toBSON());
if (hangInPersistMigrateAbortDecisionThenSimulateErrorUninterruptible.shouldFail()) {
@@ -748,7 +748,7 @@ void deleteRangeDeletionTaskLocally(OperationContext* opCtx,
const WriteConcernOptions& writeConcern) {
hangInDeleteRangeDeletionLocallyInterruptible.pauseWhileSet(opCtx);
PersistentTaskStore<RangeDeletionTask> store(NamespaceString::kRangeDeletionNamespace);
- store.remove(opCtx, QUERY(RangeDeletionTask::kIdFieldName << deletionTaskId), writeConcern);
+ store.remove(opCtx, BSON(RangeDeletionTask::kIdFieldName << deletionTaskId), writeConcern);
if (hangInDeleteRangeDeletionLocallyThenSimulateErrorUninterruptible.shouldFail()) {
hangInDeleteRangeDeletionLocallyThenSimulateErrorUninterruptible.pauseWhileSet(opCtx);
@@ -829,7 +829,7 @@ void advanceTransactionOnRecipient(OperationContext* opCtx,
void markAsReadyRangeDeletionTaskLocally(OperationContext* opCtx, const UUID& migrationId) {
PersistentTaskStore<RangeDeletionTask> store(NamespaceString::kRangeDeletionNamespace);
- auto query = QUERY(RangeDeletionTask::kIdFieldName << migrationId);
+ auto query = BSON(RangeDeletionTask::kIdFieldName << migrationId);
auto update = BSON("$unset" << BSON(RangeDeletionTask::kPendingFieldName << ""));
hangInReadyRangeDeletionLocallyInterruptible.pauseWhileSet(opCtx);
@@ -851,7 +851,7 @@ void deleteMigrationCoordinatorDocumentLocally(OperationContext* opCtx, const UU
PersistentTaskStore<MigrationCoordinatorDocument> store(
NamespaceString::kMigrationCoordinatorsNamespace);
store.remove(opCtx,
- QUERY(MigrationCoordinatorDocument::kIdFieldName << migrationId),
+ BSON(MigrationCoordinatorDocument::kIdFieldName << migrationId),
{1, WriteConcernOptions::SyncMode::UNSET, Seconds(0)});
}
@@ -899,7 +899,7 @@ void resumeMigrationCoordinationsOnStepUp(OperationContext* opCtx) {
PersistentTaskStore<MigrationCoordinatorDocument> store(
NamespaceString::kMigrationCoordinatorsNamespace);
store.forEach(opCtx,
- Query{},
+ BSONObj{},
[&opCtx, &unfinishedMigrationsCount](const MigrationCoordinatorDocument& doc) {
// MigrationCoordinators are only created under the MigrationBlockingGuard,
// which means that only one can possibly exist on an instance at a time.
@@ -973,7 +973,7 @@ void recoverMigrationCoordinations(OperationContext* opCtx, NamespaceString nss)
NamespaceString::kMigrationCoordinatorsNamespace);
store.forEach(
opCtx,
- QUERY(MigrationCoordinatorDocument::kNssFieldName << nss.toString()),
+ BSON(MigrationCoordinatorDocument::kNssFieldName << nss.toString()),
[&opCtx, &migrationRecoveryCount](const MigrationCoordinatorDocument& doc) {
LOGV2_DEBUG(4798502,
2,
diff --git a/src/mongo/db/s/migration_util.h b/src/mongo/db/s/migration_util.h
index 865056b36a3..780357dbc6d 100644
--- a/src/mongo/db/s/migration_util.h
+++ b/src/mongo/db/s/migration_util.h
@@ -85,7 +85,7 @@ std::shared_ptr<executor::ThreadPoolTaskExecutor> getMigrationUtilExecutor(
* Creates a query object that can used to find overlapping ranges in the pending range deletions
* collection.
*/
-Query overlappingRangeQuery(const ChunkRange& range, const UUID& uuid);
+BSONObj overlappingRangeQuery(const ChunkRange& range, const UUID& uuid);
/**
* Checks the pending range deletions collection to see if there are any pending ranges that
diff --git a/src/mongo/db/s/persistent_task_queue.h b/src/mongo/db/s/persistent_task_queue.h
index 05db3cb68cb..095dd3c9d55 100644
--- a/src/mongo/db/s/persistent_task_queue.h
+++ b/src/mongo/db/s/persistent_task_queue.h
@@ -110,7 +110,7 @@ PersistentTaskQueue<T>::PersistentTaskQueue(OperationContext* opCtx, NamespaceSt
DBDirectClient client(opCtx);
auto projection = BSON("_id" << 1);
- auto cursor = client.query(_storageNss, Query(), 0, 0, &projection);
+ auto cursor = client.query(_storageNss, BSONObj{}, Query(), 0, 0, &projection);
_count = cursor->itcount();
if (_count > 0)
@@ -203,15 +203,15 @@ bool PersistentTaskQueue<T>::empty(OperationContext* opCtx) const {
template <typename T>
TaskId PersistentTaskQueue<T>::_loadLastId(DBDirectClient& client) {
auto fieldsToReturn = BSON("_id" << 1);
- auto maxId =
- client.findOne(_storageNss.toString(), Query().sort(BSON("_id" << -1)), &fieldsToReturn);
+ auto maxId = client.findOne(
+ _storageNss.toString(), BSONObj{}, Query().sort(BSON("_id" << -1)), &fieldsToReturn);
return maxId.getField("_id").Long();
}
template <typename T>
typename boost::optional<typename BlockingTaskQueue<T>::Record>
PersistentTaskQueue<T>::_loadNextRecord(DBDirectClient& client) {
- auto bson = client.findOne(_storageNss.toString(), Query().sort("_id"));
+ auto bson = client.findOne(_storageNss.toString(), BSONObj{}, Query().sort("_id"));
boost::optional<typename PersistentTaskQueue<T>::Record> result;
diff --git a/src/mongo/db/s/range_deletion_util.cpp b/src/mongo/db/s/range_deletion_util.cpp
index 4cc40fb0f3e..5ad24b06cef 100644
--- a/src/mongo/db/s/range_deletion_util.cpp
+++ b/src/mongo/db/s/range_deletion_util.cpp
@@ -270,9 +270,9 @@ void ensureRangeDeletionTaskStillExists(OperationContext* opCtx, const UUID& mig
// for deleting the range deletion task document.
PersistentTaskStore<RangeDeletionTask> store(NamespaceString::kRangeDeletionNamespace);
auto count = store.count(opCtx,
- QUERY(RangeDeletionTask::kIdFieldName
- << migrationId << RangeDeletionTask::kPendingFieldName
- << BSON("$exists" << false)));
+ BSON(RangeDeletionTask::kIdFieldName
+ << migrationId << RangeDeletionTask::kPendingFieldName
+ << BSON("$exists" << false)));
invariant(count == 0 || count == 1, "found duplicate range deletion tasks");
uassert(ErrorCodes::RangeDeletionAbandonedBecauseTaskDocumentDoesNotExist,
"Range deletion task no longer exists",
@@ -380,7 +380,7 @@ void removePersistentRangeDeletionTask(const NamespaceString& nss, UUID migratio
withTemporaryOperationContext([&](OperationContext* opCtx) {
PersistentTaskStore<RangeDeletionTask> store(NamespaceString::kRangeDeletionNamespace);
- store.remove(opCtx, QUERY(RangeDeletionTask::kIdFieldName << migrationId));
+ store.remove(opCtx, BSON(RangeDeletionTask::kIdFieldName << migrationId));
});
}
@@ -413,7 +413,7 @@ std::vector<RangeDeletionTask> getPersistentRangeDeletionTasks(OperationContext*
std::vector<RangeDeletionTask> tasks;
PersistentTaskStore<RangeDeletionTask> store(NamespaceString::kRangeDeletionNamespace);
- auto query = QUERY(RangeDeletionTask::kNssFieldName << nss.ns());
+ auto query = BSON(RangeDeletionTask::kNssFieldName << nss.ns());
store.forEach(opCtx, query, [&](const RangeDeletionTask& deletionTask) {
tasks.push_back(std::move(deletionTask));
@@ -431,7 +431,7 @@ void snapshotRangeDeletionsForRename(OperationContext* opCtx,
// Clear out eventual snapshots associated with the target collection: always restart from a
// clean state in case of stepdown or primary killed.
PersistentTaskStore<RangeDeletionTask> store(NamespaceString::kRangeDeletionForRenameNamespace);
- store.remove(opCtx, QUERY(RangeDeletionTask::kNssFieldName << toNss.ns()));
+ store.remove(opCtx, BSON(RangeDeletionTask::kNssFieldName << toNss.ns()));
auto rangeDeletionTasks = getPersistentRangeDeletionTasks(opCtx, fromNss);
for (auto& task : rangeDeletionTasks) {
@@ -449,7 +449,7 @@ void restoreRangeDeletionTasksForRename(OperationContext* opCtx, const Namespace
PersistentTaskStore<RangeDeletionTask> rangeDeletionsStore(
NamespaceString::kRangeDeletionNamespace);
- const auto query = QUERY(RangeDeletionTask::kNssFieldName << nss.ns());
+ const auto query = BSON(RangeDeletionTask::kNssFieldName << nss.ns());
rangeDeletionsForRenameStore.forEach(opCtx, query, [&](const RangeDeletionTask& deletionTask) {
try {
@@ -467,13 +467,13 @@ void deleteRangeDeletionTasksForRename(OperationContext* opCtx,
// Delete range deletion tasks associated to the source collection
PersistentTaskStore<RangeDeletionTask> rangeDeletionsStore(
NamespaceString::kRangeDeletionNamespace);
- rangeDeletionsStore.remove(opCtx, QUERY(RangeDeletionTask::kNssFieldName << fromNss.ns()));
+ rangeDeletionsStore.remove(opCtx, BSON(RangeDeletionTask::kNssFieldName << fromNss.ns()));
// Delete already restored snapshots associated to the target collection
PersistentTaskStore<RangeDeletionTask> rangeDeletionsForRenameStore(
NamespaceString::kRangeDeletionForRenameNamespace);
rangeDeletionsForRenameStore.remove(opCtx,
- QUERY(RangeDeletionTask::kNssFieldName << toNss.ns()));
+ BSON(RangeDeletionTask::kNssFieldName << toNss.ns()));
}
diff --git a/src/mongo/db/s/range_deletion_util_test.cpp b/src/mongo/db/s/range_deletion_util_test.cpp
index 1b0e75d6327..8a42ae17b8c 100644
--- a/src/mongo/db/s/range_deletion_util_test.cpp
+++ b/src/mongo/db/s/range_deletion_util_test.cpp
@@ -189,7 +189,7 @@ RangeDeletionTask insertRangeDeletionTask(OperationContext* opCtx, UUID uuid, Ch
// Document should be in the store.
ASSERT_EQUALS(countDocsInConfigRangeDeletions(store, opCtx), 1);
- auto query = QUERY(RangeDeletionTask::kIdFieldName << migrationId);
+ auto query = BSON(RangeDeletionTask::kIdFieldName << migrationId);
t.setPending(boost::none);
auto update = t.toBSON();
store.update(opCtx, query, update);
diff --git a/src/mongo/db/s/recoverable_critical_section_service.cpp b/src/mongo/db/s/recoverable_critical_section_service.cpp
index 371de82a6e7..28393506183 100644
--- a/src/mongo/db/s/recoverable_critical_section_service.cpp
+++ b/src/mongo/db/s/recoverable_critical_section_service.cpp
@@ -359,7 +359,7 @@ void RecoverableCriticalSectionService::recoverRecoverableCriticalSections(
// Map the critical sections that are on disk to memory
PersistentTaskStore<CollectionCriticalSectionDocument> store(
NamespaceString::kCollectionCriticalSectionsNamespace);
- store.forEach(opCtx, Query{}, [&opCtx](const CollectionCriticalSectionDocument& doc) {
+ store.forEach(opCtx, BSONObj{}, [&opCtx](const CollectionCriticalSectionDocument& doc) {
const auto& nss = doc.getNss();
{
AutoGetCollection collLock(opCtx, nss, MODE_X);
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
index 61ddc7e13cd..46f815acd76 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp
@@ -228,7 +228,8 @@ public:
ReshardingCoordinatorDocument getCoordinatorDoc(OperationContext* opCtx) {
DBDirectClient client(opCtx);
- auto doc = client.findOne(NamespaceString::kConfigReshardingOperationsNamespace.ns(), {});
+ auto doc =
+ client.findOne(NamespaceString::kConfigReshardingOperationsNamespace.ns(), BSONObj{});
IDLParserErrorContext errCtx("reshardingCoordFromTest");
return ReshardingCoordinatorDocument::parse(errCtx, doc);
}
diff --git a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
index 0b2d6b162ba..d970eff8635 100644
--- a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp
@@ -237,7 +237,7 @@ protected:
OperationContext* opCtx, ReshardingCoordinatorDocument expectedCoordinatorDoc) {
DBDirectClient client(opCtx);
auto doc = client.findOne(NamespaceString::kConfigReshardingOperationsNamespace.ns(),
- Query(BSON("ns" << expectedCoordinatorDoc.getSourceNss().ns())));
+ BSON("ns" << expectedCoordinatorDoc.getSourceNss().ns()));
auto coordinatorDoc = ReshardingCoordinatorDocument::parse(
IDLParserErrorContext("ReshardingCoordinatorTest"), doc);
@@ -319,7 +319,7 @@ protected:
const ReshardingCoordinatorDocument& expectedCoordinatorDoc) {
DBDirectClient client(opCtx);
CollectionType onDiskEntry(
- client.findOne(CollectionType::ConfigNS.ns(), Query(BSON("_id" << _originalNss.ns()))));
+ client.findOne(CollectionType::ConfigNS.ns(), BSON("_id" << _originalNss.ns())));
ASSERT_EQUALS(onDiskEntry.getAllowMigrations(), expectedCollType.getAllowMigrations());
@@ -378,8 +378,7 @@ protected:
void assertTemporaryCollectionCatalogEntryMatchesExpected(
OperationContext* opCtx, boost::optional<CollectionType> expectedCollType) {
DBDirectClient client(opCtx);
- auto doc =
- client.findOne(CollectionType::ConfigNS.ns(), Query(BSON("_id" << _tempNss.ns())));
+ auto doc = client.findOne(CollectionType::ConfigNS.ns(), BSON("_id" << _tempNss.ns()));
if (!expectedCollType) {
ASSERT(doc.isEmpty());
return;
@@ -423,7 +422,7 @@ protected:
const Timestamp& collTimestamp) {
DBDirectClient client(opCtx);
std::vector<ChunkType> foundChunks;
- auto cursor = client.query(ChunkType::ConfigNS, Query(BSON("uuid" << uuid)));
+ auto cursor = client.query(ChunkType::ConfigNS, BSON("uuid" << uuid));
while (cursor->more()) {
auto d = uassertStatusOK(
ChunkType::fromConfigBSON(cursor->nextSafe().getOwned(), collEpoch, collTimestamp));
@@ -449,7 +448,7 @@ protected:
DBDirectClient client(opCtx);
std::vector<TagsType> foundZones;
- auto cursor = client.query(TagsType::ConfigNS, Query(BSON("ns" << nss.ns())));
+ auto cursor = client.query(TagsType::ConfigNS, BSON("ns" << nss.ns()));
while (cursor->more()) {
foundZones.push_back(
uassertStatusOK(TagsType::fromBSON(cursor->nextSafe().getOwned())));
@@ -618,11 +617,10 @@ protected:
// Check that chunks and tags under the temp namespace have been removed
DBDirectClient client(opCtx);
- auto chunkDoc =
- client.findOne(ChunkType::ConfigNS.ns(), Query(BSON("ns" << _tempNss.ns())));
+ auto chunkDoc = client.findOne(ChunkType::ConfigNS.ns(), BSON("ns" << _tempNss.ns()));
ASSERT(chunkDoc.isEmpty());
- auto tagDoc = client.findOne(TagsType::ConfigNS.ns(), Query(BSON("ns" << _tempNss.ns())));
+ auto tagDoc = client.findOne(TagsType::ConfigNS.ns(), BSON("ns" << _tempNss.ns()));
ASSERT(tagDoc.isEmpty());
// Check that chunks and tags entries previously under the temporary namespace have been
@@ -642,7 +640,7 @@ protected:
// Check that the entry is removed from config.reshardingOperations
DBDirectClient client(opCtx);
auto doc = client.findOne(NamespaceString::kConfigReshardingOperationsNamespace.ns(),
- Query(BSON("ns" << expectedCoordinatorDoc.getSourceNss().ns())));
+ BSON("ns" << expectedCoordinatorDoc.getSourceNss().ns()));
ASSERT(doc.isEmpty());
// Check that the resharding fields are removed from the config.collections entry and
diff --git a/src/mongo/db/s/resharding/resharding_data_copy_util.cpp b/src/mongo/db/s/resharding/resharding_data_copy_util.cpp
index 2fed3542cd0..3461c15b815 100644
--- a/src/mongo/db/s/resharding/resharding_data_copy_util.cpp
+++ b/src/mongo/db/s/resharding/resharding_data_copy_util.cpp
@@ -105,8 +105,8 @@ void ensureOplogCollectionsDropped(OperationContext* opCtx,
NamespaceString::kReshardingApplierProgressNamespace);
oplogApplierProgressStore.remove(
opCtx,
- QUERY(ReshardingOplogApplierProgress::kOplogSourceIdFieldName
- << reshardingSourceId.toBSON()),
+ BSON(ReshardingOplogApplierProgress::kOplogSourceIdFieldName
+ << reshardingSourceId.toBSON()),
WriteConcernOptions());
// Remove the txn cloner progress doc for this donor.
@@ -114,7 +114,7 @@ void ensureOplogCollectionsDropped(OperationContext* opCtx,
NamespaceString::kReshardingTxnClonerProgressNamespace);
txnClonerProgressStore.remove(
opCtx,
- QUERY(ReshardingTxnClonerProgress::kSourceIdFieldName << reshardingSourceId.toBSON()),
+ BSON(ReshardingTxnClonerProgress::kSourceIdFieldName << reshardingSourceId.toBSON()),
WriteConcernOptions());
// Drop the conflict stash collection for this donor.
diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp b/src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp
index 2e9358925a7..853f71024b6 100644
--- a/src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp
@@ -312,7 +312,7 @@ void clearFilteringMetadata(OperationContext* opCtx, bool scheduleAsyncRefresh)
NamespaceString::kRecipientReshardingOperationsNamespace}) {
PersistentTaskStore<CommonReshardingMetadata> store(homeToReshardingDocs);
- store.forEach(opCtx, Query(), [&](CommonReshardingMetadata reshardingDoc) -> bool {
+ store.forEach(opCtx, BSONObj{}, [&](CommonReshardingMetadata reshardingDoc) -> bool {
namespacesToRefresh.insert(reshardingDoc.getSourceNss());
namespacesToRefresh.insert(reshardingDoc.getTempReshardingNss());
diff --git a/src/mongo/db/s/resharding/resharding_donor_service_test.cpp b/src/mongo/db/s/resharding/resharding_donor_service_test.cpp
index 202e5898b79..47e35c202e7 100644
--- a/src/mongo/db/s/resharding/resharding_donor_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_donor_service_test.cpp
@@ -693,7 +693,7 @@ TEST_F(ReshardingDonorServiceTest, TruncatesXLErrorOnDonorDocument) {
NamespaceString::kDonorReshardingOperationsNamespace);
store.forEach(
opCtx.get(),
- QUERY(ReshardingDonorDocument::kReshardingUUIDFieldName << doc.getReshardingUUID()),
+ BSON(ReshardingDonorDocument::kReshardingUUIDFieldName << doc.getReshardingUUID()),
[&](const auto& donorDocument) {
persistedDonorDocument.emplace(donorDocument);
return false;
diff --git a/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp b/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp
index cb6efb31d30..7cd00b1f637 100644
--- a/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp
+++ b/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp
@@ -132,7 +132,7 @@ ReshardingCleaner<Service, StateMachine, ReshardingDocument>::_fetchReshardingDo
OperationContext* opCtx) {
boost::optional<ReshardingDocument> docOptional;
_store.forEach(opCtx,
- QUERY(ReshardingDocument::kReshardingUUIDFieldName << _reshardingUUID),
+ BSON(ReshardingDocument::kReshardingUUIDFieldName << _reshardingUUID),
[&](const ReshardingDocument& doc) {
docOptional.emplace(doc);
return false;
diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier.cpp
index ad2f2e399ad..1e47f26b6c0 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_applier.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_applier.cpp
@@ -230,7 +230,7 @@ void ReshardingOplogApplier::_clearAppliedOpsAndStoreProgress(OperationContext*
store.upsert(
opCtx,
- QUERY(ReshardingOplogApplierProgress::kOplogSourceIdFieldName << _sourceId.toBSON()),
+ BSON(ReshardingOplogApplierProgress::kOplogSourceIdFieldName << _sourceId.toBSON()),
builder.obj());
_env->metrics()->onOplogEntriesApplied(_currentBatchToApply.size());
diff --git a/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp
index c97b47ffce8..c21596f18e5 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp
@@ -229,7 +229,7 @@ public:
std::vector<repl::DurableOplogEntry> result;
PersistentTaskStore<repl::OplogEntryBase> store(NamespaceString::kRsOplogNamespace);
- store.forEach(opCtx, QUERY("ts" << BSON("$gt" << ts)), [&](const auto& oplogEntry) {
+ store.forEach(opCtx, BSON("ts" << BSON("$gt" << ts)), [&](const auto& oplogEntry) {
result.emplace_back(
unittest::assertGet(repl::DurableOplogEntry::parse(oplogEntry.toBSON())));
return true;
@@ -245,7 +245,7 @@ public:
PersistentTaskStore<SessionTxnRecord> store(
NamespaceString::kSessionTransactionsTableNamespace);
store.forEach(opCtx,
- QUERY(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON()),
+ BSON(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON()),
[&](const auto& sessionTxnRecord) {
result.emplace(sessionTxnRecord);
return false;
diff --git a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
index b933e609fc1..a88783eda55 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp
@@ -206,27 +206,26 @@ public:
std::vector<SlimApplyOpsInfo> result;
PersistentTaskStore<repl::OplogEntryBase> store(NamespaceString::kRsOplogNamespace);
- store.forEach(
- opCtx,
- QUERY("op"
- << "c"
- << "o.applyOps" << BSON("$exists" << true) << "ts" << BSON("$gt" << ts)),
- [&](const auto& oplogEntry) {
- auto applyOpsCmd = oplogEntry.getObject().getOwned();
- auto applyOpsInfo = repl::ApplyOpsCommandInfo::parse(applyOpsCmd);
-
- std::vector<repl::DurableReplOperation> operations;
- operations.reserve(applyOpsInfo.getOperations().size());
-
- for (const auto& innerOp : applyOpsInfo.getOperations()) {
- operations.emplace_back(
- repl::DurableReplOperation::parse({"findApplyOpsNewerThan"}, innerOp));
- }
-
- result.emplace_back(
- SlimApplyOpsInfo{std::move(applyOpsCmd), std::move(operations)});
- return true;
- });
+ store.forEach(opCtx,
+ BSON("op"
+ << "c"
+ << "o.applyOps" << BSON("$exists" << true) << "ts" << BSON("$gt" << ts)),
+ [&](const auto& oplogEntry) {
+ auto applyOpsCmd = oplogEntry.getObject().getOwned();
+ auto applyOpsInfo = repl::ApplyOpsCommandInfo::parse(applyOpsCmd);
+
+ std::vector<repl::DurableReplOperation> operations;
+ operations.reserve(applyOpsInfo.getOperations().size());
+
+ for (const auto& innerOp : applyOpsInfo.getOperations()) {
+ operations.emplace_back(repl::DurableReplOperation::parse(
+ {"findApplyOpsNewerThan"}, innerOp));
+ }
+
+ result.emplace_back(
+ SlimApplyOpsInfo{std::move(applyOpsCmd), std::move(operations)});
+ return true;
+ });
return result;
}
diff --git a/src/mongo/db/s/resharding/resharding_oplog_session_application_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_session_application_test.cpp
index 14aed43039a..61a519c7493 100644
--- a/src/mongo/db/s/resharding/resharding_oplog_session_application_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_oplog_session_application_test.cpp
@@ -174,7 +174,7 @@ public:
std::vector<repl::DurableOplogEntry> result;
PersistentTaskStore<repl::OplogEntryBase> store(NamespaceString::kRsOplogNamespace);
- store.forEach(opCtx, QUERY("ts" << BSON("$gt" << ts)), [&](const auto& oplogEntry) {
+ store.forEach(opCtx, BSON("ts" << BSON("$gt" << ts)), [&](const auto& oplogEntry) {
result.emplace_back(
unittest::assertGet(repl::DurableOplogEntry::parse(oplogEntry.toBSON())));
return true;
@@ -190,7 +190,7 @@ public:
PersistentTaskStore<SessionTxnRecord> store(
NamespaceString::kSessionTransactionsTableNamespace);
store.forEach(opCtx,
- QUERY(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON()),
+ BSON(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON()),
[&](const auto& sessionTxnRecord) {
result.emplace(sessionTxnRecord);
return false;
diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
index 481d04a3556..5640ea3bac9 100644
--- a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp
@@ -627,8 +627,8 @@ TEST_F(ReshardingRecipientServiceTest, TruncatesXLErrorOnRecipientDocument) {
PersistentTaskStore<ReshardingRecipientDocument> store(
NamespaceString::kRecipientReshardingOperationsNamespace);
store.forEach(opCtx.get(),
- QUERY(ReshardingRecipientDocument::kReshardingUUIDFieldName
- << doc.getReshardingUUID()),
+ BSON(ReshardingRecipientDocument::kReshardingUUIDFieldName
+ << doc.getReshardingUUID()),
[&](const auto& recipientDocument) {
persistedRecipientDocument.emplace(recipientDocument);
return false;
diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner.cpp b/src/mongo/db/s/resharding/resharding_txn_cloner.cpp
index c0600fbbc5d..2ffa4e40ab0 100644
--- a/src/mongo/db/s/resharding/resharding_txn_cloner.cpp
+++ b/src/mongo/db/s/resharding/resharding_txn_cloner.cpp
@@ -111,7 +111,7 @@ boost::optional<LogicalSessionId> ReshardingTxnCloner::_fetchProgressLsid(Operat
boost::optional<LogicalSessionId> progressLsid;
store.forEach(opCtx,
- QUERY(ReshardingTxnClonerProgress::kSourceIdFieldName << _sourceId.toBSON()),
+ BSON(ReshardingTxnClonerProgress::kSourceIdFieldName << _sourceId.toBSON()),
[&](const auto& doc) {
progressLsid = doc.getProgress();
return false;
@@ -184,7 +184,7 @@ void ReshardingTxnCloner::_updateProgressDocument(OperationContext* opCtx,
store.upsert(
opCtx,
- QUERY(ReshardingTxnClonerProgress::kSourceIdFieldName << _sourceId.toBSON()),
+ BSON(ReshardingTxnClonerProgress::kSourceIdFieldName << _sourceId.toBSON()),
BSON("$set" << BSON(ReshardingTxnClonerProgress::kProgressFieldName << progress.toBSON())),
{1, WriteConcernOptions::SyncMode::UNSET, Seconds(0)});
}
diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
index 1b22d39f53c..e302f943693 100644
--- a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp
@@ -253,9 +253,10 @@ protected:
DBDirectClient client(operationContext());
// The same logical session entry may be inserted more than once by a test case, so use a
// $natural sort to find the most recently inserted entry.
- Query oplogQuery(BSON(repl::OplogEntryBase::kSessionIdFieldName << sessionId.toBSON()));
- auto bsonOplog = client.findOne(NamespaceString::kRsOplogNamespace.ns(),
- oplogQuery.sort(BSON("$natural" << -1)));
+ auto bsonOplog =
+ client.findOne(NamespaceString::kRsOplogNamespace.ns(),
+ BSON(repl::OplogEntryBase::kSessionIdFieldName << sessionId.toBSON()),
+ Query().sort(BSON("$natural" << -1)));
ASSERT(!bsonOplog.isEmpty());
auto oplogEntry = repl::MutableOplogEntry::parse(bsonOplog).getValue();
ASSERT_EQ(oplogEntry.getTxnNumber().get(), txnNum);
@@ -265,7 +266,7 @@ protected:
auto bsonTxn =
client.findOne(NamespaceString::kSessionTransactionsTableNamespace.ns(),
- {BSON(SessionTxnRecord::kSessionIdFieldName << sessionId.toBSON())});
+ BSON(SessionTxnRecord::kSessionIdFieldName << sessionId.toBSON()));
ASSERT(!bsonTxn.isEmpty());
auto txn = SessionTxnRecord::parse(
IDLParserErrorContext("resharding config transactions cloning test"), bsonTxn);
@@ -422,7 +423,7 @@ protected:
std::vector<repl::DurableOplogEntry> result;
PersistentTaskStore<repl::OplogEntryBase> store(NamespaceString::kRsOplogNamespace);
- store.forEach(opCtx, QUERY("ts" << BSON("$gt" << ts)), [&](const auto& oplogEntry) {
+ store.forEach(opCtx, BSON("ts" << BSON("$gt" << ts)), [&](const auto& oplogEntry) {
result.emplace_back(
unittest::assertGet(repl::DurableOplogEntry::parse(oplogEntry.toBSON())));
return true;
@@ -438,7 +439,7 @@ protected:
PersistentTaskStore<SessionTxnRecord> store(
NamespaceString::kSessionTransactionsTableNamespace);
store.forEach(opCtx,
- QUERY(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON()),
+ BSON(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON()),
[&](const auto& sessionTxnRecord) {
result.emplace(sessionTxnRecord);
return false;
diff --git a/src/mongo/db/s/resharding_destined_recipient_test.cpp b/src/mongo/db/s/resharding_destined_recipient_test.cpp
index 362a88e9c30..8943f73ffb9 100644
--- a/src/mongo/db/s/resharding_destined_recipient_test.cpp
+++ b/src/mongo/db/s/resharding_destined_recipient_test.cpp
@@ -391,7 +391,7 @@ TEST_F(DestinedRecipientTest, TestOpObserverSetsDestinedRecipientOnMultiUpdates)
OperationShardingState::get(opCtx).initializeClientRoutingVersions(
kNss, ChunkVersion::IGNORED(), env.dbVersion);
client.update(kNss.ns(),
- Query{BSON("x" << 0)},
+ BSON("x" << 0),
BSON("$set" << BSON("z" << 5)),
false /*upsert*/,
true /*multi*/);
diff --git a/src/mongo/db/s/session_catalog_migration_destination_test.cpp b/src/mongo/db/s/session_catalog_migration_destination_test.cpp
index 1768d029e9e..7d770e47cff 100644
--- a/src/mongo/db/s/session_catalog_migration_destination_test.cpp
+++ b/src/mongo/db/s/session_catalog_migration_destination_test.cpp
@@ -1994,7 +1994,7 @@ TEST_F(SessionCatalogMigrationDestinationTest, MigratingKnownStmtWhileOplogTrunc
{
// Confirm that oplog is indeed empty.
DBDirectClient client(opCtx);
- auto result = client.findOne(NamespaceString::kRsOplogNamespace.ns(), {});
+ auto result = client.findOne(NamespaceString::kRsOplogNamespace.ns(), BSONObj{});
ASSERT_TRUE(result.isEmpty());
}
diff --git a/src/mongo/db/s/session_catalog_migration_source.cpp b/src/mongo/db/s/session_catalog_migration_source.cpp
index 38c1723adab..c257fc08404 100644
--- a/src/mongo/db/s/session_catalog_migration_source.cpp
+++ b/src/mongo/db/s/session_catalog_migration_source.cpp
@@ -63,6 +63,7 @@ boost::optional<repl::OplogEntry> forgeNoopEntryFromImageCollection(
BSONObj imageObj =
client.findOne(NamespaceString::kConfigImagesNamespace.ns(),
BSON("_id" << retryableFindAndModifyOplogEntry.getSessionId()->toBSON()),
+ Query(),
nullptr);
if (imageObj.isEmpty()) {
return boost::none;
@@ -124,7 +125,7 @@ boost::optional<repl::OplogEntry> fetchPrePostImageOplog(OperationContext* opCtx
auto opTime = opTimeToFetch.value();
DBDirectClient client(opCtx);
auto oplogBSON =
- client.findOne(NamespaceString::kRsOplogNamespace.ns(), opTime.asQuery(), nullptr);
+ client.findOne(NamespaceString::kRsOplogNamespace.ns(), opTime.asQuery(), Query(), nullptr);
return uassertStatusOK(repl::OplogEntry::parse(oplogBSON));
}
@@ -192,13 +193,12 @@ SessionCatalogMigrationSource::SessionCatalogMigrationSource(OperationContext* o
_rollbackIdAtInit(repl::ReplicationProcess::get(opCtx)->getRollbackID()),
_chunkRange(std::move(chunk)),
_keyPattern(shardKey) {
- Query query;
// Sort is not needed for correctness. This is just for making it easier to write deterministic
// tests.
- query.sort(BSON("_id" << 1));
-
DBDirectClient client(opCtx);
- auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace, query);
+ auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace,
+ BSONObj{},
+ Query().sort(BSON("_id" << 1)));
while (cursor->more()) {
auto nextSession = SessionTxnRecord::parse(
@@ -422,7 +422,7 @@ bool SessionCatalogMigrationSource::_fetchNextNewWriteOplog(OperationContext* op
DBDirectClient client(opCtx);
const auto& newWriteOplogDoc = client.findOne(
- NamespaceString::kRsOplogNamespace.ns(), nextOpTimeToFetch.asQuery(), nullptr);
+ NamespaceString::kRsOplogNamespace.ns(), nextOpTimeToFetch.asQuery(), Query(), nullptr);
uassert(40620,
str::stream() << "Unable to fetch oplog entry with opTime: "
diff --git a/src/mongo/db/s/shard_key_util.cpp b/src/mongo/db/s/shard_key_util.cpp
index 8641111c168..601a1624208 100644
--- a/src/mongo/db/s/shard_key_util.cpp
+++ b/src/mongo/db/s/shard_key_util.cpp
@@ -227,7 +227,7 @@ void ValidationBehaviorsShardCollection::verifyCanCreateShardKeyIndex(
uassert(ErrorCodes::InvalidOptions,
"Please create an index that starts with the proposed shard key before "
"sharding the collection",
- _localClient->findOne(nss.ns(), Query()).isEmpty());
+ _localClient->findOne(nss.ns(), BSONObj{}).isEmpty());
}
void ValidationBehaviorsShardCollection::createShardKeyIndex(
diff --git a/src/mongo/db/s/shard_metadata_util.cpp b/src/mongo/db/s/shard_metadata_util.cpp
index d7bf11091bf..12e25a256e5 100644
--- a/src/mongo/db/s/shard_metadata_util.cpp
+++ b/src/mongo/db/s/shard_metadata_util.cpp
@@ -137,12 +137,13 @@ StatusWith<RefreshState> getPersistedRefreshFlags(OperationContext* opCtx,
StatusWith<ShardCollectionType> readShardCollectionsEntry(OperationContext* opCtx,
const NamespaceString& nss) {
- Query fullQuery(BSON(ShardCollectionType::kNssFieldName << nss.ns()));
-
try {
DBDirectClient client(opCtx);
std::unique_ptr<DBClientCursor> cursor =
- client.query(NamespaceString::kShardConfigCollectionsNamespace, fullQuery, 1);
+ client.query(NamespaceString::kShardConfigCollectionsNamespace,
+ BSON(ShardCollectionType::kNssFieldName << nss.ns()),
+ Query(),
+ 1);
if (!cursor) {
return Status(ErrorCodes::OperationFailed,
str::stream() << "Failed to establish a cursor for reading "
@@ -165,12 +166,13 @@ StatusWith<ShardCollectionType> readShardCollectionsEntry(OperationContext* opCt
}
StatusWith<ShardDatabaseType> readShardDatabasesEntry(OperationContext* opCtx, StringData dbName) {
- Query fullQuery(BSON(ShardDatabaseType::name() << dbName.toString()));
-
try {
DBDirectClient client(opCtx);
std::unique_ptr<DBClientCursor> cursor =
- client.query(NamespaceString::kShardConfigDatabasesNamespace, fullQuery, 1);
+ client.query(NamespaceString::kShardConfigDatabasesNamespace,
+ BSON(ShardDatabaseType::name() << dbName.toString()),
+ Query(),
+ 1);
if (!cursor) {
return Status(ErrorCodes::OperationFailed,
str::stream() << "Failed to establish a cursor for reading "
@@ -292,11 +294,8 @@ StatusWith<std::vector<ChunkType>> readShardChunks(OperationContext* opCtx,
try {
DBDirectClient client(opCtx);
- Query fullQuery(query);
- fullQuery.sort(sort);
-
std::unique_ptr<DBClientCursor> cursor =
- client.query(chunksNss, fullQuery, limit.get_value_or(0));
+ client.query(chunksNss, query, Query().sort(sort), limit.get_value_or(0));
uassert(ErrorCodes::OperationFailed,
str::stream() << "Failed to establish a cursor for reading " << chunksNss.ns()
<< " from local storage",
diff --git a/src/mongo/db/s/shard_metadata_util_test.cpp b/src/mongo/db/s/shard_metadata_util_test.cpp
index 1739c32da5d..814517f17d9 100644
--- a/src/mongo/db/s/shard_metadata_util_test.cpp
+++ b/src/mongo/db/s/shard_metadata_util_test.cpp
@@ -139,12 +139,13 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture {
try {
DBDirectClient client(operationContext());
for (auto& chunk : chunks) {
- Query query(BSON(ChunkType::minShardID()
- << chunk.getMin() << ChunkType::max() << chunk.getMax()));
- query.readPref(ReadPreference::Nearest, BSONArray());
-
NamespaceString chunkMetadataNss{ChunkType::ShardNSPrefix + uuid.toString()};
- std::unique_ptr<DBClientCursor> cursor = client.query(chunkMetadataNss, query, 1);
+ std::unique_ptr<DBClientCursor> cursor =
+ client.query(chunkMetadataNss,
+ BSON(ChunkType::minShardID()
+ << chunk.getMin() << ChunkType::max() << chunk.getMax()),
+ Query().readPref(ReadPreference::Nearest, BSONArray()),
+ 1);
ASSERT(cursor);
ASSERT(cursor->more());
diff --git a/src/mongo/db/s/sharding_ddl_util_test.cpp b/src/mongo/db/s/sharding_ddl_util_test.cpp
index 7eb847f789d..9b9fdc950ca 100644
--- a/src/mongo/db/s/sharding_ddl_util_test.cpp
+++ b/src/mongo/db/s/sharding_ddl_util_test.cpp
@@ -83,12 +83,14 @@ const NamespaceString kToNss("test.to");
// Query 'limit' objects from the database into an array.
void findN(DBClientBase& client,
const std::string& ns,
- Query query,
+ const BSONObj& filter,
+ const Query& querySettings,
int limit,
std::vector<BSONObj>& out) {
out.reserve(limit);
std::unique_ptr<DBClientCursor> c = client.query(NamespaceString(ns),
- std::move(query),
+ filter,
+ querySettings,
limit,
0 /*nToSkip*/,
nullptr /*fieldsToReturn*/,
@@ -109,9 +111,9 @@ TEST_F(ShardingDDLUtilTest, ShardedRenameMetadata) {
DBDirectClient client(opCtx);
const NamespaceString fromNss("test.from");
- const auto fromCollQuery = Query(BSON(CollectionType::kNssFieldName << fromNss.ns()));
+ const auto fromCollQuery = BSON(CollectionType::kNssFieldName << fromNss.ns());
- const auto toCollQuery = Query(BSON(CollectionType::kNssFieldName << kToNss.ns()));
+ const auto toCollQuery = BSON(CollectionType::kNssFieldName << kToNss.ns());
const Timestamp collTimestamp(1);
const auto collUUID = UUID::gen();
@@ -156,10 +158,13 @@ TEST_F(ShardingDDLUtilTest, ShardedRenameMetadata) {
// Get FROM collection document and chunks
auto fromDoc = client.findOne(CollectionType::ConfigNS.ns(), fromCollQuery);
CollectionType fromCollection(fromDoc);
- auto fromChunksQuery =
- Query(BSON(ChunkType::collectionUUID << collUUID)).sort(BSON("_id" << 1));
std::vector<BSONObj> fromChunks;
- findN(client, ChunkType::ConfigNS.ns(), fromChunksQuery, nChunks, fromChunks);
+ findN(client,
+ ChunkType::ConfigNS.ns(),
+ BSON(ChunkType::collectionUUID << collUUID) /*filter*/,
+ Query().sort(BSON("_id" << 1)),
+ nChunks,
+ fromChunks);
auto fromCollType = Grid::get(opCtx)->catalogClient()->getCollection(opCtx, fromNss);
// Perform the metadata rename
@@ -171,11 +176,14 @@ TEST_F(ShardingDDLUtilTest, ShardedRenameMetadata) {
// Get TO collection document and chunks
auto toDoc = client.findOne(CollectionType::ConfigNS.ns(), toCollQuery);
- const auto toChunksQuery =
- Query(BSON(ChunkType::collectionUUID << collUUID)).sort(BSON("_id" << 1));
CollectionType toCollection(toDoc);
std::vector<BSONObj> toChunks;
- findN(client, ChunkType::ConfigNS.ns(), toChunksQuery, nChunks, toChunks);
+ findN(client,
+ ChunkType::ConfigNS.ns(),
+ BSON(ChunkType::collectionUUID << collUUID) /*filter*/,
+ Query().sort(BSON("_id" << 1)),
+ nChunks,
+ toChunks);
// Check that original epoch/timestamp are changed in config.collections entry
ASSERT(fromCollection.getEpoch() != toCollection.getEpoch());
diff --git a/src/mongo/db/s/transaction_coordinator_test.cpp b/src/mongo/db/s/transaction_coordinator_test.cpp
index 5b060c8d968..924b9920b37 100644
--- a/src/mongo/db/s/transaction_coordinator_test.cpp
+++ b/src/mongo/db/s/transaction_coordinator_test.cpp
@@ -137,7 +137,7 @@ protected:
void waitUntilCoordinatorDocIsPresent() {
DBDirectClient dbClient(operationContext());
- while (dbClient.findOne(NamespaceString::kTransactionCoordinatorsNamespace.ns(), Query())
+ while (dbClient.findOne(NamespaceString::kTransactionCoordinatorsNamespace.ns(), BSONObj{})
.isEmpty())
;
}
@@ -151,13 +151,14 @@ protected:
do {
doc = TransactionCoordinatorDocument::parse(
IDLParserErrorContext("dummy"),
- dbClient.findOne(NamespaceString::kTransactionCoordinatorsNamespace.ns(), Query()));
+ dbClient.findOne(NamespaceString::kTransactionCoordinatorsNamespace.ns(),
+ BSONObj{}));
} while (!doc.getDecision());
}
void waitUntilNoCoordinatorDocIsPresent() {
DBDirectClient dbClient(operationContext());
- while (!dbClient.findOne(NamespaceString::kTransactionCoordinatorsNamespace.ns(), Query())
+ while (!dbClient.findOne(NamespaceString::kTransactionCoordinatorsNamespace.ns(), BSONObj{})
.isEmpty())
;
}
diff --git a/src/mongo/db/s/transaction_coordinator_util.cpp b/src/mongo/db/s/transaction_coordinator_util.cpp
index 0bc0d019c6c..9e07378295d 100644
--- a/src/mongo/db/s/transaction_coordinator_util.cpp
+++ b/src/mongo/db/s/transaction_coordinator_util.cpp
@@ -163,7 +163,7 @@ repl::OpTime persistParticipantListBlocking(OperationContext* opCtx,
// changed since the update above ran.
const auto doc = client.findOne(
NamespaceString::kTransactionCoordinatorsNamespace.toString(),
- QUERY(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON()));
+ BSON(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON()));
uasserted(51025,
str::stream() << "While attempting to write participant list "
<< buildParticipantListString(participantList) << " for "
@@ -375,7 +375,7 @@ repl::OpTime persistDecisionBlocking(OperationContext* opCtx,
// changed since the update above ran.
const auto doc = client.findOne(
NamespaceString::kTransactionCoordinatorsNamespace.ns(),
- QUERY(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON()));
+ BSON(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON()));
uasserted(51026,
str::stream() << "While attempting to write decision "
<< (isCommit ? "'commit'" : "'abort'") << " for" << lsid.getId()
@@ -542,7 +542,7 @@ void deleteCoordinatorDocBlocking(OperationContext* opCtx,
// changed since the update above ran.
const auto doc = client.findOne(
NamespaceString::kTransactionCoordinatorsNamespace.toString(),
- QUERY(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON()));
+ BSON(TransactionCoordinatorDocument::kIdFieldName << sessionInfo.toBSON()));
uasserted(51027,
str::stream() << "While attempting to delete document for " << lsid.getId() << ':'
<< txnNumber
@@ -591,7 +591,7 @@ std::vector<TransactionCoordinatorDocument> readAllCoordinatorDocs(OperationCont
DBDirectClient client(opCtx);
auto coordinatorDocsCursor =
- client.query(NamespaceString::kTransactionCoordinatorsNamespace, Query{});
+ client.query(NamespaceString::kTransactionCoordinatorsNamespace, BSONObj{});
while (coordinatorDocsCursor->more()) {
// TODO (SERVER-38307): Try/catch around parsing the document and skip the document if it
diff --git a/src/mongo/db/s/vector_clock_shard_server_test.cpp b/src/mongo/db/s/vector_clock_shard_server_test.cpp
index 22efd956506..1a62bf34d84 100644
--- a/src/mongo/db/s/vector_clock_shard_server_test.cpp
+++ b/src/mongo/db/s/vector_clock_shard_server_test.cpp
@@ -292,8 +292,8 @@ protected:
}
};
-const Query kVectorClockQuery = QUERY("_id"
- << "vectorClockState");
+const BSONObj kVectorClockQuery = BSON("_id"
+ << "vectorClockState");
TEST_F(VectorClockPersistenceTest, PrimaryPersistVectorClockDocument) {
auto sc = getServiceContext();
diff --git a/src/mongo/db/session_catalog_mongod.cpp b/src/mongo/db/session_catalog_mongod.cpp
index 7fa62f5363a..075e5bda2cc 100644
--- a/src/mongo/db/session_catalog_mongod.cpp
+++ b/src/mongo/db/session_catalog_mongod.cpp
@@ -239,9 +239,9 @@ void createRetryableFindAndModifyTable(OperationContext* opCtx) {
void abortInProgressTransactions(OperationContext* opCtx) {
DBDirectClient client(opCtx);
- Query query(BSON(SessionTxnRecord::kStateFieldName
- << DurableTxnState_serializer(DurableTxnStateEnum::kInProgress)));
- auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace, query);
+ auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace,
+ BSON(SessionTxnRecord::kStateFieldName << DurableTxnState_serializer(
+ DurableTxnStateEnum::kInProgress)));
if (cursor->more()) {
LOGV2_DEBUG(21977, 3, "Aborting in-progress transactions on stepup.");
}
@@ -428,12 +428,12 @@ int MongoDSessionCatalog::reapSessionsOlderThan(OperationContext* opCtx,
// Scan for records older than the minimum lifetime and uses a sort to walk the '_id' index
DBDirectClient client(opCtx);
- auto cursor =
- client.query(NamespaceString::kSessionTransactionsTableNamespace,
- Query(BSON(kLastWriteDateFieldName << LT << possiblyExpired)).sort(kSortById),
- 0,
- 0,
- &kIdProjection);
+ auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace,
+ BSON(kLastWriteDateFieldName << LT << possiblyExpired),
+ Query().sort(kSortById),
+ 0,
+ 0,
+ &kIdProjection);
// The max batch size is chosen so that a single batch won't exceed the 16MB BSON object size
// limit
diff --git a/src/mongo/db/transaction_participant.cpp b/src/mongo/db/transaction_participant.cpp
index a11860b17e6..a9cfe387ad9 100644
--- a/src/mongo/db/transaction_participant.cpp
+++ b/src/mongo/db/transaction_participant.cpp
@@ -140,9 +140,8 @@ ActiveTransactionHistory fetchActiveTransactionHistory(OperationContext* opCtx,
// field has been set, bumping the global lock acquisition to an IX. That upconvert would
// require a flow control ticket to be obtained.
FlowControl::Bypass flowControlBypass(opCtx);
- auto result =
- client.findOne(NamespaceString::kSessionTransactionsTableNamespace.ns(),
- {BSON(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON())});
+ auto result = client.findOne(NamespaceString::kSessionTransactionsTableNamespace.ns(),
+ BSON(SessionTxnRecord::kSessionIdFieldName << lsid.toBSON()));
if (result.isEmpty()) {
return boost::none;
}
diff --git a/src/mongo/db/transaction_participant_retryable_writes_test.cpp b/src/mongo/db/transaction_participant_retryable_writes_test.cpp
index 5b4ce8b1d49..c2582f6ba60 100644
--- a/src/mongo/db/transaction_participant_retryable_writes_test.cpp
+++ b/src/mongo/db/transaction_participant_retryable_writes_test.cpp
@@ -258,7 +258,7 @@ protected:
DBDirectClient client(opCtx());
auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace,
- {BSON("_id" << session->getSessionId().toBSON())});
+ BSON("_id" << session->getSessionId().toBSON()));
ASSERT(cursor);
ASSERT(cursor->more());
@@ -296,7 +296,7 @@ TEST_F(TransactionParticipantRetryableWritesTest, SessionEntryNotWrittenOnBegin)
DBDirectClient client(opCtx());
auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace,
- {BSON("_id" << sessionId.toBSON())});
+ BSON("_id" << sessionId.toBSON()));
ASSERT(cursor);
ASSERT(!cursor->more());
}
@@ -313,7 +313,7 @@ TEST_F(TransactionParticipantRetryableWritesTest, SessionEntryWrittenAtFirstWrit
DBDirectClient client(opCtx());
auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace,
- {BSON("_id" << sessionId.toBSON())});
+ BSON("_id" << sessionId.toBSON()));
ASSERT(cursor);
ASSERT(cursor->more());
@@ -339,7 +339,7 @@ TEST_F(TransactionParticipantRetryableWritesTest,
DBDirectClient client(opCtx());
auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace,
- {BSON("_id" << sessionId.toBSON())});
+ BSON("_id" << sessionId.toBSON()));
ASSERT(cursor);
ASSERT(cursor->more());
diff --git a/src/mongo/db/vector_clock_mongod.cpp b/src/mongo/db/vector_clock_mongod.cpp
index 157b0d32593..16ecec730c9 100644
--- a/src/mongo/db/vector_clock_mongod.cpp
+++ b/src/mongo/db/vector_clock_mongod.cpp
@@ -330,7 +330,7 @@ Future<void> VectorClockMongoD::_doWhileQueueNotEmptyOrError(ServiceContext* ser
NamespaceString::kVectorClockNamespace);
store.forEach(
opCtx,
- QUERY(VectorClockDocument::k_idFieldName << durableVectorClock.get_id()),
+ BSON(VectorClockDocument::k_idFieldName << durableVectorClock.get_id()),
[&, numDocsFound = 0](const auto& doc) mutable {
invariant(++numDocsFound == 1);
durableVectorClock = doc;
@@ -348,7 +348,7 @@ Future<void> VectorClockMongoD::_doWhileQueueNotEmptyOrError(ServiceContext* ser
PersistentTaskStore<VectorClockDocument> store(NamespaceString::kVectorClockNamespace);
store.upsert(opCtx,
- QUERY(VectorClockDocument::k_idFieldName << vcd.get_id()),
+ BSON(VectorClockDocument::k_idFieldName << vcd.get_id()),
vcd.toBSON(),
WriteConcerns::kMajorityWriteConcern);
return vectorTime;