summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorDavid Storch <david.storch@mongodb.com>2022-05-03 23:17:45 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-05-04 00:08:29 +0000
commitf462237ac17a9c8a3e4a5a3fb6bbe6a966d4be85 (patch)
tree6e37a46ffd65e3a18c300bfe8672909a7b43fcb6 /src
parentc9260ac7135fc183f2a785124d033debc63e2734 (diff)
downloadmongo-f462237ac17a9c8a3e4a5a3fb6bbe6a966d4be85.tar.gz
SERVER-64315 Re-enable caching of SBE plans when there is a single query solution
This reverts commit f8589f840c8fee60abc482d2d2c41979e356922a.
Diffstat (limited to 'src')
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_d.cpp2
-rw-r--r--src/mongo/db/dbhelpers.cpp22
-rw-r--r--src/mongo/db/dbhelpers.h24
-rw-r--r--src/mongo/db/index_build_entry_helpers.cpp7
-rw-r--r--src/mongo/db/pipeline/document_source_lookup.cpp1
-rw-r--r--src/mongo/db/pipeline/expression_context.cpp2
-rw-r--r--src/mongo/db/pipeline/expression_context.h3
-rw-r--r--src/mongo/db/query/classic_plan_cache.cpp10
-rw-r--r--src/mongo/db/query/get_executor.cpp9
-rw-r--r--src/mongo/db/query/sbe_sub_planner.cpp3
-rw-r--r--src/mongo/db/repl/oplog.cpp2
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp5
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp2
-rw-r--r--src/mongo/db/repl/storage_timestamp_test.cpp3
-rw-r--r--src/mongo/db/repl/tenant_migration_donor_service.cpp6
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp7
-rw-r--r--src/mongo/db/repl/tenant_oplog_applier.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_data_copy_util.cpp3
-rw-r--r--src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp2
-rw-r--r--src/mongo/db/serverless/shard_split_donor_service_test.cpp3
-rw-r--r--src/mongo/db/views/durable_view_catalog.cpp6
-rw-r--r--src/mongo/dbtests/querytests.cpp48
22 files changed, 57 insertions, 117 deletions
diff --git a/src/mongo/db/auth/authz_manager_external_state_d.cpp b/src/mongo/db/auth/authz_manager_external_state_d.cpp
index 2f0618a200b..b602cc1b963 100644
--- a/src/mongo/db/auth/authz_manager_external_state_d.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_d.cpp
@@ -95,7 +95,7 @@ bool AuthzManagerExternalStateMongod::hasOne(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query) {
AutoGetCollectionForReadCommandMaybeLockFree ctx(opCtx, collectionName);
- return !Helpers::findOne(opCtx, ctx.getCollection(), query, false).isNull();
+ return !Helpers::findOne(opCtx, ctx.getCollection(), query).isNull();
}
namespace {
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 8dd89b032dd..64412e5a2fd 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -59,15 +59,11 @@ using std::set;
using std::string;
using std::unique_ptr;
-/* fetch a single object from collection ns that matches query
- set your db SavedContext first
-*/
bool Helpers::findOne(OperationContext* opCtx,
const CollectionPtr& collection,
const BSONObj& query,
- BSONObj& result,
- bool requireIndex) {
- RecordId loc = findOne(opCtx, collection, query, requireIndex);
+ BSONObj& result) {
+ RecordId loc = findOne(opCtx, collection, query);
if (loc.isNull())
return false;
result = collection->docFor(opCtx, loc).value();
@@ -79,8 +75,7 @@ BSONObj Helpers::findOneForTesting(OperationContext* opCtx,
const BSONObj& query,
const bool invariantOnError) {
BSONObj ret;
- const bool requiresIndex = true;
- bool found = findOne(opCtx, collection, query, ret, requiresIndex);
+ bool found = findOne(opCtx, collection, query, ret);
if (invariantOnError) {
invariant(found);
}
@@ -94,20 +89,18 @@ BSONObj Helpers::findOneForTesting(OperationContext* opCtx,
*/
RecordId Helpers::findOne(OperationContext* opCtx,
const CollectionPtr& collection,
- const BSONObj& query,
- bool requireIndex) {
+ const BSONObj& query) {
if (!collection)
return RecordId();
auto findCommand = std::make_unique<FindCommandRequest>(collection->ns());
findCommand->setFilter(query);
- return findOne(opCtx, collection, std::move(findCommand), requireIndex);
+ return findOne(opCtx, collection, std::move(findCommand));
}
RecordId Helpers::findOne(OperationContext* opCtx,
const CollectionPtr& collection,
- std::unique_ptr<FindCommandRequest> findCommand,
- bool requireIndex) {
+ std::unique_ptr<FindCommandRequest> findCommand) {
if (!collection)
return RecordId();
@@ -125,13 +118,12 @@ RecordId Helpers::findOne(OperationContext* opCtx,
massertStatusOK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
- size_t options = requireIndex ? QueryPlannerParams::NO_TABLE_SCAN : QueryPlannerParams::DEFAULT;
auto exec = uassertStatusOK(getExecutor(opCtx,
&collection,
std::move(cq),
nullptr /* extractAndAttachPipelineStages */,
PlanYieldPolicy::YieldPolicy::NO_YIELD,
- options));
+ QueryPlannerParams::DEFAULT));
PlanExecutor::ExecState state;
BSONObj obj;
diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h
index 2115fa7a144..b975bceaf21 100644
--- a/src/mongo/db/dbhelpers.h
+++ b/src/mongo/db/dbhelpers.h
@@ -50,24 +50,20 @@ class FindCommandRequest;
struct Helpers {
/**
* Executes the given match expression ('query') and returns true if there is at least one
- * one matching document. The first found matching document is returned via the 'result' output
+ * matching document. The first found matching document is returned via the 'result' output
* parameter.
*
- * If 'requireIndex' is true, then this forces the query system to choose an indexed plan. An
- * exception is thrown if no 'requireIndex' is set to true but no indexed plan exists.
- *
* Performs the read successfully regardless of a replica set node's state, meaning that the
* node does not need to be primary or secondary.
*/
static bool findOne(OperationContext* opCtx,
const CollectionPtr& collection,
const BSONObj& query,
- BSONObj& result,
- bool requireIndex = false);
+ BSONObj& result);
/**
- * If `invariantOnError` is true, an error (e.g: no document found) will crash the
- * process. Otherwise the empty BSONObj will be returned.
+ * If `invariantOnError` is true, an error (e.g: no document found) will crash the process.
+ * Otherwise the empty BSONObj will be returned.
*/
static BSONObj findOneForTesting(OperationContext* opCtx,
const CollectionPtr& collection,
@@ -80,16 +76,16 @@ struct Helpers {
*/
static RecordId findOne(OperationContext* opCtx,
const CollectionPtr& collection,
- const BSONObj& query,
- bool requireIndex);
+ const BSONObj& query);
static RecordId findOne(OperationContext* opCtx,
const CollectionPtr& collection,
- std::unique_ptr<FindCommandRequest> qr,
- bool requireIndex);
+ std::unique_ptr<FindCommandRequest> qr);
/**
- * @param foundIndex if passed in will be set to 1 if ns and index found
- * @return true if object found
+ * If 'indexFound' is not nullptr, will be set to true if the query was answered using the _id
+ * index or using a clustered _id index.
+ *
+ * Returns true if a matching document was found.
*/
static bool findById(OperationContext* opCtx,
Database* db,
diff --git a/src/mongo/db/index_build_entry_helpers.cpp b/src/mongo/db/index_build_entry_helpers.cpp
index 317f3bb8839..1004ded65b4 100644
--- a/src/mongo/db/index_build_entry_helpers.cpp
+++ b/src/mongo/db/index_build_entry_helpers.cpp
@@ -305,11 +305,8 @@ StatusWith<IndexBuildEntry> getIndexBuildEntry(OperationContext* opCtx, UUID ind
// exceptions and we must protect it from unanticipated write conflicts from reads.
bool foundObj = writeConflictRetry(
opCtx, "getIndexBuildEntry", NamespaceString::kIndexBuildEntryNamespace.ns(), [&]() {
- return Helpers::findOne(opCtx,
- collection.getCollection(),
- BSON("_id" << indexBuildUUID),
- obj,
- /*requireIndex=*/true);
+ return Helpers::findOne(
+ opCtx, collection.getCollection(), BSON("_id" << indexBuildUUID), obj);
});
if (!foundObj) {
diff --git a/src/mongo/db/pipeline/document_source_lookup.cpp b/src/mongo/db/pipeline/document_source_lookup.cpp
index c76d57a596c..f6f53594e7e 100644
--- a/src/mongo/db/pipeline/document_source_lookup.cpp
+++ b/src/mongo/db/pipeline/document_source_lookup.cpp
@@ -156,6 +156,7 @@ DocumentSourceLookUp::DocumentSourceLookUp(
_resolvedPipeline = resolvedNamespace.pipeline;
_fromExpCtx = expCtx->copyForSubPipeline(resolvedNamespace.ns, resolvedNamespace.uuid);
+ _fromExpCtx->inLookup = true;
if (fromCollator) {
_fromExpCtx->setCollator(std::move(fromCollator.get()));
_hasExplicitCollation = true;
diff --git a/src/mongo/db/pipeline/expression_context.cpp b/src/mongo/db/pipeline/expression_context.cpp
index bb24bd630a9..7258456a1ac 100644
--- a/src/mongo/db/pipeline/expression_context.cpp
+++ b/src/mongo/db/pipeline/expression_context.cpp
@@ -225,6 +225,8 @@ intrusive_ptr<ExpressionContext> ExpressionContext::copyWith(
expCtx->originalAggregateCommand = originalAggregateCommand.getOwned();
+ expCtx->inLookup = inLookup;
+
// Note that we intentionally skip copying the value of '_interruptCounter' because 'expCtx' is
// intended to be used for executing a separate aggregation pipeline.
diff --git a/src/mongo/db/pipeline/expression_context.h b/src/mongo/db/pipeline/expression_context.h
index 3e33cc5b6bf..c1f734b79b7 100644
--- a/src/mongo/db/pipeline/expression_context.h
+++ b/src/mongo/db/pipeline/expression_context.h
@@ -436,6 +436,9 @@ public:
// Tracks the depth of nested aggregation sub-pipelines. Used to enforce depth limits.
long long subPipelineDepth = 0;
+ // True if this 'ExpressionContext' object is for the inner side of a $lookup.
+ bool inLookup = false;
+
// If set, this will disallow use of features introduced in versions above the provided version.
boost::optional<multiversion::FeatureCompatibilityVersion> maxFeatureCompatibilityVersion;
diff --git a/src/mongo/db/query/classic_plan_cache.cpp b/src/mongo/db/query/classic_plan_cache.cpp
index 31ac6d13550..5f33481fef6 100644
--- a/src/mongo/db/query/classic_plan_cache.cpp
+++ b/src/mongo/db/query/classic_plan_cache.cpp
@@ -150,12 +150,12 @@ bool shouldCacheQuery(const CanonicalQuery& query) {
// don't affect cache state, and it also makes sure that we can always generate information
// regarding rejected plans and/or trial period execution of candidate plans.
//
- // In order to be able to correctly measure 'executionTimeMillis' stats we should only skip
- // caching top-level plans. Otherwise, if we were to skip caching inner pipelines for $lookup
- // queries, we could run through the multi-planner for each document coming from the outer side,
- // completely skewing the 'executionTimeMillis' stats.
+ // There is one exception: $lookup's implementation in the DocumentSource engine relies on
+ // caching the plan on the inner side in order to avoid repeating the planning process for every
+ // document on the outer side. To ensure that the 'executionTimeMillis' value is accurate for
+ // $lookup, we allow the inner side to use the cache even if the query is an explain.
tassert(6497600, "expCtx is null", query.getExpCtxRaw());
- if (query.getExplain() && query.getExpCtxRaw()->subPipelineDepth == 0) {
+ if (query.getExplain() && !query.getExpCtxRaw()->inLookup) {
return false;
}
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index f69ee7e93d0..aa5ac8cc6b6 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -1342,11 +1342,10 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getSlotBasedExe
roots[0] = helper.buildExecutableTree(*(solutions[0]));
}
auto&& [root, data] = roots[0];
- // TODO SERVER-64315: re-enable caching of single solution plans
- // if (!planningResult->recoveredPinnedCacheEntry()) {
- // plan_cache_util::updatePlanCache(
- // opCtx, collections.getMainCollection(), *cq, *solutions[0], *root, data);
- // }
+ if (!planningResult->recoveredPinnedCacheEntry()) {
+ plan_cache_util::updatePlanCache(
+ opCtx, collections.getMainCollection(), *cq, *solutions[0], *root, data);
+ }
// Prepare the SBE tree for execution.
stage_builder::prepareSlotBasedExecutableTree(
diff --git a/src/mongo/db/query/sbe_sub_planner.cpp b/src/mongo/db/query/sbe_sub_planner.cpp
index 377c65d2d3e..e5e714ad3aa 100644
--- a/src/mongo/db/query/sbe_sub_planner.cpp
+++ b/src/mongo/db/query/sbe_sub_planner.cpp
@@ -117,8 +117,7 @@ CandidatePlans SubPlanner::plan(
// TODO SERVER-61507: do it unconditionally when $group pushdown is integrated with the SBE plan
// cache.
if (_cq.pipeline().empty()) {
- // TODO SERVER-64315: re-enable caching of single solution plans
- // plan_cache_util::updatePlanCache(_opCtx, mainColl, _cq, *compositeSolution, *root, data);
+ plan_cache_util::updatePlanCache(_opCtx, mainColl, _cq, *compositeSolution, *root, data);
}
return {makeVector(plan_ranker::CandidatePlan{
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index b5c85cb52d4..0bb880a6aea 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -1589,7 +1589,7 @@ Status applyOperation_inlock(OperationContext* opCtx,
Helpers::findById(opCtx, collection, updateCriteria).isNull()) ||
// capped collections won't have an _id index
(!indexCatalog->haveIdIndex(opCtx) &&
- Helpers::findOne(opCtx, collection, updateCriteria, false).isNull())) {
+ Helpers::findOne(opCtx, collection, updateCriteria).isNull())) {
static constexpr char msg[] = "Couldn't find document";
LOGV2_ERROR(21259, msg, "op"_attr = redact(op.toBSONForLogging()));
return Status(ErrorCodes::UpdateOperationFailed,
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index 677d5a70f03..b409cae6977 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -1747,7 +1747,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
if (collection && removeSaver) {
BSONObj obj;
- bool found = Helpers::findOne(opCtx, collection.get(), pattern, obj, false);
+ bool found = Helpers::findOne(opCtx, collection.get(), pattern, obj);
if (found) {
auto status = removeSaver->goingToDelete(obj);
if (!status.isOK()) {
@@ -1798,8 +1798,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
const auto clock = opCtx->getServiceContext()->getFastClockSource();
const auto findOneStart = clock->now();
- RecordId loc =
- Helpers::findOne(opCtx, collection.get(), pattern, false);
+ RecordId loc = Helpers::findOne(opCtx, collection.get(), pattern);
if (clock->now() - findOneStart > Milliseconds(200))
LOGV2_WARNING(
21726,
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index 4b7e2f97df0..f38d94926f8 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -1450,7 +1450,7 @@ Status StorageInterfaceImpl::isAdminDbValid(OperationContext* opCtx) {
CollectionPtr usersCollection =
catalog->lookupCollectionByNamespace(opCtx, AuthorizationManager::usersCollectionNamespace);
const bool hasUsers =
- usersCollection && !Helpers::findOne(opCtx, usersCollection, BSONObj(), false).isNull();
+ usersCollection && !Helpers::findOne(opCtx, usersCollection, BSONObj()).isNull();
CollectionPtr adminVersionCollection = catalog->lookupCollectionByNamespace(
opCtx, AuthorizationManager::versionCollectionNamespace);
BSONObj authSchemaVersionDocument;
diff --git a/src/mongo/db/repl/storage_timestamp_test.cpp b/src/mongo/db/repl/storage_timestamp_test.cpp
index 207dc87dcb7..023dfa8bf92 100644
--- a/src/mongo/db/repl/storage_timestamp_test.cpp
+++ b/src/mongo/db/repl/storage_timestamp_test.cpp
@@ -2754,8 +2754,7 @@ TEST_F(StorageTimestampTest, IndexBuildsResolveErrorsDuringStateChangeToPrimary)
// to the side writes table and must be drained.
Helpers::upsert(_opCtx, collection->ns().ns(), BSON("_id" << 0 << "a" << 1 << "b" << 1));
{
- RecordId badRecord =
- Helpers::findOne(_opCtx, collection.get(), BSON("_id" << 1), false /* requireIndex */);
+ RecordId badRecord = Helpers::findOne(_opCtx, collection.get(), BSON("_id" << 1));
WriteUnitOfWork wuow(_opCtx);
collection->deleteDocument(_opCtx, kUninitializedStmtId, badRecord, nullptr);
wuow.commit();
diff --git a/src/mongo/db/repl/tenant_migration_donor_service.cpp b/src/mongo/db/repl/tenant_migration_donor_service.cpp
index 5b7ae3a98c3..2416406958f 100644
--- a/src/mongo/db/repl/tenant_migration_donor_service.cpp
+++ b/src/mongo/db/repl/tenant_migration_donor_service.cpp
@@ -544,10 +544,8 @@ ExecutorFuture<repl::OpTime> TenantMigrationDonorService::Instance::_updateState
opCtx, "TenantMigrationDonorUpdateStateDoc", _stateDocumentsNS.ns(), [&] {
WriteUnitOfWork wuow(opCtx);
- const auto originalRecordId = Helpers::findOne(opCtx,
- collection.getCollection(),
- originalStateDocBson,
- false /* requireIndex */);
+ const auto originalRecordId = Helpers::findOne(
+ opCtx, collection.getCollection(), originalStateDocBson);
const auto originalSnapshot = Snapshotted<BSONObj>(
opCtx->recoveryUnit()->getSnapshotId(), originalStateDocBson);
invariant(!originalRecordId.isNull());
diff --git a/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp b/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp
index b8e4f683d58..bc798479097 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp
@@ -147,11 +147,8 @@ StatusWith<TenantMigrationRecipientDocument> getStateDoc(OperationContext* opCtx
}
BSONObj result;
- auto foundDoc = Helpers::findOne(opCtx,
- collection.getCollection(),
- BSON("_id" << migrationUUID),
- result,
- /*requireIndex=*/true);
+ auto foundDoc =
+ Helpers::findOne(opCtx, collection.getCollection(), BSON("_id" << migrationUUID), result);
if (!foundDoc) {
return Status(ErrorCodes::NoMatchingDocument,
str::stream() << "No matching state doc found with tenant migration UUID: "
diff --git a/src/mongo/db/repl/tenant_oplog_applier.cpp b/src/mongo/db/repl/tenant_oplog_applier.cpp
index 14aa00fafd2..ed6a035e244 100644
--- a/src/mongo/db/repl/tenant_oplog_applier.cpp
+++ b/src/mongo/db/repl/tenant_oplog_applier.cpp
@@ -983,9 +983,7 @@ Status TenantOplogApplier::_applyOplogEntryOrGroupedInserts(
// During tenant migration oplog application, we only need to apply createIndex on empty
// collections. Otherwise, the index is guaranteed to be dropped after. This is because
// we block index builds on the donor for the duration of the tenant migration.
- if (!Helpers::findOne(
- opCtx, autoColl.getCollection(), BSONObj(), false /* requireIndex */)
- .isNull()) {
+ if (!Helpers::findOne(opCtx, autoColl.getCollection(), BSONObj()).isNull()) {
LOGV2_DEBUG(5652701,
2,
"Tenant migration ignoring createIndex for non-empty collection",
diff --git a/src/mongo/db/s/resharding/resharding_data_copy_util.cpp b/src/mongo/db/s/resharding/resharding_data_copy_util.cpp
index 80b86bce735..4bcfbacd782 100644
--- a/src/mongo/db/s/resharding/resharding_data_copy_util.cpp
+++ b/src/mongo/db/s/resharding/resharding_data_copy_util.cpp
@@ -187,8 +187,7 @@ boost::optional<Document> findDocWithHighestInsertedId(OperationContext* opCtx,
findCommand->setLimit(1);
findCommand->setSort(BSON("_id" << -1));
- auto recordId =
- Helpers::findOne(opCtx, collection, std::move(findCommand), true /* requireIndex */);
+ auto recordId = Helpers::findOne(opCtx, collection, std::move(findCommand));
if (recordId.isNull()) {
return boost::none;
}
diff --git a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
index 10a777709f0..fb3925ea159 100644
--- a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
@@ -269,7 +269,7 @@ protected:
const ReshardingEnv& env) {
AutoGetCollection coll(opCtx, nss, MODE_IX);
- RecordId rid = Helpers::findOne(opCtx, coll.getCollection(), query, false);
+ RecordId rid = Helpers::findOne(opCtx, coll.getCollection(), query);
ASSERT(!rid.isNull());
WriteUnitOfWork wuow(opCtx);
diff --git a/src/mongo/db/serverless/shard_split_donor_service_test.cpp b/src/mongo/db/serverless/shard_split_donor_service_test.cpp
index 403ad75d113..2d44ded9b7a 100644
--- a/src/mongo/db/serverless/shard_split_donor_service_test.cpp
+++ b/src/mongo/db/serverless/shard_split_donor_service_test.cpp
@@ -98,8 +98,7 @@ StatusWith<ShardSplitDonorDocument> getStateDocument(OperationContext* opCtx,
auto foundDoc = Helpers::findOne(opCtx,
collection.getCollection(),
BSON(ShardSplitDonorDocument::kIdFieldName << shardSplitId),
- result,
- true);
+ result);
if (!foundDoc) {
return Status(ErrorCodes::NoMatchingDocument,
diff --git a/src/mongo/db/views/durable_view_catalog.cpp b/src/mongo/db/views/durable_view_catalog.cpp
index 93b2bc277d5..936fcd64506 100644
--- a/src/mongo/db/views/durable_view_catalog.cpp
+++ b/src/mongo/db/views/durable_view_catalog.cpp
@@ -237,8 +237,7 @@ void DurableViewCatalogImpl::upsert(OperationContext* opCtx,
CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, systemViewsNs);
invariant(systemViews);
- const bool requireIndex = false;
- RecordId id = Helpers::findOne(opCtx, systemViews, BSON("_id" << name.ns()), requireIndex);
+ RecordId id = Helpers::findOne(opCtx, systemViews, BSON("_id" << name.ns()));
Snapshotted<BSONObj> oldView;
if (!id.isValid() || !systemViews->findDoc(opCtx, id, &oldView)) {
@@ -270,8 +269,7 @@ void DurableViewCatalogImpl::remove(OperationContext* opCtx, const NamespaceStri
if (!systemViews)
return;
- const bool requireIndex = false;
- RecordId id = Helpers::findOne(opCtx, systemViews, BSON("_id" << name.ns()), requireIndex);
+ RecordId id = Helpers::findOne(opCtx, systemViews, BSON("_id" << name.ns()));
if (!id.isValid())
return;
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 576a791a1bf..070767fa8cc 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -188,45 +188,12 @@ public:
BSONObj query = fromjson("{$or:[{b:2},{c:3}]}");
BSONObj ret;
// Check findOne() returning object.
- ASSERT(Helpers::findOne(&_opCtx, _collection, query, ret, true));
+ ASSERT(Helpers::findOne(&_opCtx, _collection, query, ret));
ASSERT_EQUALS(string("b"), ret.firstElement().fieldName());
// Cross check with findOne() returning location.
ASSERT_BSONOBJ_EQ(
ret,
- _collection->docFor(&_opCtx, Helpers::findOne(&_opCtx, _collection, query, true))
- .value());
- }
-};
-
-class FindOneRequireIndex : public Base {
-public:
- void run() {
- insert(BSON("b" << 2 << "_id" << 0));
- BSONObj query = fromjson("{b:2}");
- BSONObj ret;
-
- // Check findOne() returning object, allowing unindexed scan.
- ASSERT(Helpers::findOne(&_opCtx, _collection, query, ret, false));
- // Check findOne() returning location, allowing unindexed scan.
- ASSERT_BSONOBJ_EQ(
- ret,
- _collection->docFor(&_opCtx, Helpers::findOne(&_opCtx, _collection, query, false))
- .value());
-
- // Check findOne() returning object, requiring indexed scan without index.
- ASSERT_THROWS(Helpers::findOne(&_opCtx, _collection, query, ret, true), AssertionException);
- // Check findOne() returning location, requiring indexed scan without index.
- ASSERT_THROWS(Helpers::findOne(&_opCtx, _collection, query, true), AssertionException);
-
- addIndex(IndexSpec().addKey("b").unique(false));
-
- // Check findOne() returning object, requiring indexed scan with index.
- ASSERT(Helpers::findOne(&_opCtx, _collection, query, ret, true));
- // Check findOne() returning location, requiring indexed scan with index.
- ASSERT_BSONOBJ_EQ(
- ret,
- _collection->docFor(&_opCtx, Helpers::findOne(&_opCtx, _collection, query, true))
- .value());
+ _collection->docFor(&_opCtx, Helpers::findOne(&_opCtx, _collection, query)).value());
}
};
@@ -262,12 +229,11 @@ public:
insert(BSONObj());
BSONObj query;
BSONObj ret;
- ASSERT(Helpers::findOne(&_opCtx, _collection, query, ret, false));
+ ASSERT(Helpers::findOne(&_opCtx, _collection, query, ret));
ASSERT(ret.isEmpty());
ASSERT_BSONOBJ_EQ(
ret,
- _collection->docFor(&_opCtx, Helpers::findOne(&_opCtx, _collection, query, false))
- .value());
+ _collection->docFor(&_opCtx, Helpers::findOne(&_opCtx, _collection, query)).value());
}
};
@@ -1432,7 +1398,7 @@ public:
ASSERT_EQUALS(50, count());
BSONObj res;
- ASSERT(Helpers::findOne(&_opCtx, ctx.getCollection(), BSON("_id" << 20), res, true));
+ ASSERT(Helpers::findOne(&_opCtx, ctx.getCollection(), BSON("_id" << 20), res));
ASSERT_EQUALS(40, res["x"].numberInt());
ASSERT(Helpers::findById(&_opCtx, ctx.db(), ns(), BSON("_id" << 20), res));
@@ -1447,8 +1413,7 @@ public:
{
Timer t;
for (int i = 0; i < n; i++) {
- ASSERT(
- Helpers::findOne(&_opCtx, ctx.getCollection(), BSON("_id" << 20), res, true));
+ ASSERT(Helpers::findOne(&_opCtx, ctx.getCollection(), BSON("_id" << 20), res));
}
slow = t.micros();
}
@@ -1936,7 +1901,6 @@ public:
void setupTests() {
add<FindingStart>();
add<FindOneOr>();
- add<FindOneRequireIndex>();
add<FindOneEmptyObj>();
add<BoundedKey>();
add<GetMore>();