summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Storch <david.storch@mongodb.com>2022-05-03 23:17:45 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-05-04 18:11:43 +0000
commita616699c511fb395f3c0c6f53e705b306927a687 (patch)
treecc6a204210c02c149505bd5f744b1b4433135300
parent3c0df34e8a6a4c8f68428c2fbfb965f0b2e42e64 (diff)
downloadmongo-a616699c511fb395f3c0c6f53e705b306927a687.tar.gz
SERVER-64315 Re-enable caching of SBE plans when there is a single query solution
This reverts commit f8589f840c8fee60abc482d2d2c41979e356922a. (cherry picked from commit f462237ac17a9c8a3e4a5a3fb6bbe6a966d4be85)
-rw-r--r--etc/backports_required_for_multiversion_tests.yml16
-rw-r--r--jstests/core/plan_cache_sbe.js5
-rw-r--r--jstests/core/sbe/plan_cache_sbe_with_or_queries.js6
-rw-r--r--jstests/core/sbe_plan_cache_autoparameterize_collscan.js (renamed from jstests/noPassthrough/sbe_plan_cache_autoparameterize_collscan.js)41
-rw-r--r--jstests/core/wildcard_index_cached_plans.js9
-rw-r--r--jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js9
-rw-r--r--src/mongo/db/auth/authz_manager_external_state_d.cpp2
-rw-r--r--src/mongo/db/dbhelpers.cpp22
-rw-r--r--src/mongo/db/dbhelpers.h24
-rw-r--r--src/mongo/db/index_build_entry_helpers.cpp7
-rw-r--r--src/mongo/db/pipeline/document_source_lookup.cpp1
-rw-r--r--src/mongo/db/pipeline/expression_context.cpp2
-rw-r--r--src/mongo/db/pipeline/expression_context.h3
-rw-r--r--src/mongo/db/query/classic_plan_cache.cpp10
-rw-r--r--src/mongo/db/query/get_executor.cpp9
-rw-r--r--src/mongo/db/query/sbe_sub_planner.cpp3
-rw-r--r--src/mongo/db/repl/oplog.cpp2
-rw-r--r--src/mongo/db/repl/rs_rollback.cpp5
-rw-r--r--src/mongo/db/repl/storage_interface_impl.cpp2
-rw-r--r--src/mongo/db/repl/storage_timestamp_test.cpp3
-rw-r--r--src/mongo/db/repl/tenant_migration_donor_service.cpp6
-rw-r--r--src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp7
-rw-r--r--src/mongo/db/repl/tenant_oplog_applier.cpp4
-rw-r--r--src/mongo/db/s/resharding/resharding_data_copy_util.cpp3
-rw-r--r--src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp2
-rw-r--r--src/mongo/db/serverless/shard_split_utils.cpp3
-rw-r--r--src/mongo/db/views/durable_view_catalog.cpp6
-rw-r--r--src/mongo/dbtests/querytests.cpp48
28 files changed, 99 insertions, 161 deletions
diff --git a/etc/backports_required_for_multiversion_tests.yml b/etc/backports_required_for_multiversion_tests.yml
index 32348914edb..7cdf30437ca 100644
--- a/etc/backports_required_for_multiversion_tests.yml
+++ b/etc/backports_required_for_multiversion_tests.yml
@@ -206,6 +206,14 @@ last-continuous:
test_file: jstests/replsets/capped_deletes.js
- ticket: SERVER-65773
test_file: jstests/aggregation/agg_infinite_recursion.js
+ - ticket: SERVER-64315
+ test_file: jstests/core/plan_cache_sbe.js
+ - ticket: SERVER-64315
+ test_file: jstests/core/sbe/plan_cache_sbe_with_or_queries.js
+ - ticket: SERVER-64315
+ test_file: jstests/core/sbe_plan_cache_autoparameterize_collscan.js
+ - ticket: SERVER-64315
+ test_file: jstests/core/wildcard_index_cached_plans.js
# Tests that should only be excluded from particular suites should be listed under that suite.
suites:
@@ -547,6 +555,14 @@ last-lts:
test_file: jstests/replsets/capped_deletes.js
- ticket: SERVER-65773
test_file: jstests/aggregation/agg_infinite_recursion.js
+ - ticket: SERVER-64315
+ test_file: jstests/core/plan_cache_sbe.js
+ - ticket: SERVER-64315
+ test_file: jstests/core/sbe/plan_cache_sbe_with_or_queries.js
+ - ticket: SERVER-64315
+ test_file: jstests/core/sbe_plan_cache_autoparameterize_collscan.js
+ - ticket: SERVER-64315
+ test_file: jstests/core/wildcard_index_cached_plans.js
# Tests that should only be excluded from particular suites should be listed under that suite.
suites:
diff --git a/jstests/core/plan_cache_sbe.js b/jstests/core/plan_cache_sbe.js
index 4dcb69b036a..dfed346f5ee 100644
--- a/jstests/core/plan_cache_sbe.js
+++ b/jstests/core/plan_cache_sbe.js
@@ -12,7 +12,7 @@
* assumes_read_concern_unchanged,
* assumes_read_preference_unchanged,
* assumes_unsharded_collection,
- * # Single solution plans can be cached in SBE plan cache after V6.0.
+ * # The SBE plan cache was introduced in 6.0.
* requires_fcv_60,
* ]
*/
@@ -29,8 +29,7 @@ const isSbePlanCacheEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache"]);
assert.commandWorked(coll.insert({a: 1, b: 1}));
// Check that a new entry is added to the plan cache even for single plans.
-// TODO SERVER-64315: re-enable the test for single solution plans
-if (false /*isSbePlanCacheEnabled*/) {
+if (isSbePlanCacheEnabled) {
assert.eq(1, coll.find({a: 1}).itcount());
// Validate sbe plan cache stats entry.
const allStats = coll.aggregate([{$planCacheStats: {}}]).toArray();
diff --git a/jstests/core/sbe/plan_cache_sbe_with_or_queries.js b/jstests/core/sbe/plan_cache_sbe_with_or_queries.js
index 6f46233fd95..19b66ee05cf 100644
--- a/jstests/core/sbe/plan_cache_sbe_with_or_queries.js
+++ b/jstests/core/sbe/plan_cache_sbe_with_or_queries.js
@@ -22,12 +22,6 @@ if (!isSBEEnabled) {
return;
}
-// TODO SERVER-64315: re-enable this test
-if (true) {
- jsTest.log("This test is temporary disabled");
- return;
-}
-
function getPlanCacheEntries(query, collection, db) {
const planCacheKey = getPlanCacheKeyFromShape({query, collection, db});
return coll.aggregate([{$planCacheStats: {}}, {$match: {planCacheKey}}]).toArray();
diff --git a/jstests/noPassthrough/sbe_plan_cache_autoparameterize_collscan.js b/jstests/core/sbe_plan_cache_autoparameterize_collscan.js
index 79361c2ae24..18fdf4305ca 100644
--- a/jstests/noPassthrough/sbe_plan_cache_autoparameterize_collscan.js
+++ b/jstests/core/sbe_plan_cache_autoparameterize_collscan.js
@@ -1,6 +1,15 @@
/**
* Tests that auto-parameterized collection scan plans are correctly stored and in the SBE plan
* cache, and that they can be correctly recovered from the cache with new parameter values.
+ *
+ * @tags: [
+ * assumes_read_concern_unchanged,
+ * assumes_read_preference_unchanged,
+ * assumes_unsharded_collection,
+ * does_not_support_stepdowns,
+ * # The SBE plan cache was introduced in 6.0.
+ * requires_fcv_60,
+ * ]
*/
(function() {
"use strict";
@@ -8,33 +17,15 @@
load("jstests/libs/analyze_plan.js");
load("jstests/libs/sbe_util.js");
-// TODO SERVER-64315: re-enable this test. This test depends on caching single solution plans,
-// which is disabled temporarily due to a bug.
-//
-// As part of re-enabling the test, we should move it to jstests/core/ so that it can benefit from
-// passthrough testing. This test formerly needed to be in noPassthrough because it required a
-// special flag to enable auto-parameterization, but this is no longer the case.
-if (true) {
- jsTest.log("This test is temporarily disabled");
- return;
-}
-
-const conn = MongoRunner.runMongod();
-assert.neq(conn, null, "mongod failed to start up");
-
-const dbName = jsTestName();
-const db = conn.getDB(dbName);
-
// This test is specifically verifying the behavior of the SBE plan cache. So if either the SBE plan
// cache or SBE itself are disabled, bail out.
if (!checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
jsTestLog("Skipping test because either SBE engine or SBE plan cache is disabled");
- MongoRunner.stopMongod(conn);
return;
}
-assert.commandWorked(db.dropDatabase());
-const coll = db.coll;
+const coll = db.sbe_plan_cache_autoparameterize_collscan;
+coll.drop();
let data = [
{_id: 0, a: 1, c: "foo"},
@@ -352,7 +343,7 @@ runTest({query: {a: {$in: [1, 2]}}, projection: {_id: 1}},
runTest({query: {a: {$in: [1, 2]}}, projection: {_id: 1}},
[{_id: 0}, {_id: 1}],
{query: {a: {$in: [1, 2, /foo/]}}, projection: {_id: 1}},
- [{_id: 0}, {_id: 1}, {_id: 13}],
+ [{_id: 0}, {_id: 1}, {_id: 13}, {_id: 14}],
false);
// Adding a nested array to an $in inhibits auto-parameterization.
@@ -404,12 +395,14 @@ runTest({query: {a: /foo/}, projection: {_id: 1}},
[{_id: 15}],
true);
-// Test auto-parameterization of $type.
+// Test that $type is not auto-parameterized.
+//
+// TODO SERVER-64776: Re-enable auto-parameterization for $type predicates.
runTest({query: {a: {$type: "double"}}, projection: {_id: 1}},
[{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}, {_id: 6}],
{query: {a: {$type: ["string", "regex"]}}, projection: {_id: 1}},
[{_id: 13}, {_id: 14}, {_id: 15}],
- true);
+ false);
// Test that $type is not auto-parameterized when the type set includes "array".
runTest({query: {a: {$type: ["string", "regex"]}}, projection: {_id: 1}},
@@ -417,6 +410,4 @@ runTest({query: {a: {$type: ["string", "regex"]}}, projection: {_id: 1}},
{query: {a: {$type: ["string", "array"]}}, projection: {_id: 1}},
[{_id: 5}, {_id: 6}, {_id: 8}, {_id: 11}, {_id: 12}, {_id: 13}, {_id: 15}],
false);
-
-MongoRunner.stopMongod(conn);
}());
diff --git a/jstests/core/wildcard_index_cached_plans.js b/jstests/core/wildcard_index_cached_plans.js
index d113f919362..7ca356726c1 100644
--- a/jstests/core/wildcard_index_cached_plans.js
+++ b/jstests/core/wildcard_index_cached_plans.js
@@ -124,12 +124,9 @@ assert.neq(getPlanCacheKeyFromShape({query: queryWithBNull, collection: coll, db
// SBE plan cache.
cacheEntry = getCacheEntryForQuery({a: 1, b: null});
if (checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
- // TODO SERVER-64315: uncomment the following lines once we start caching single solution plans
- // again
- // assert.neq(cacheEntry, null);
- // assert.eq(cacheEntry.isActive, true, cacheEntry);
- // assert.eq(cacheEntry.isPinned, true, cacheEntry);
- assert.eq(cacheEntry, null);
+ assert.neq(cacheEntry, null);
+ assert.eq(cacheEntry.isActive, true, cacheEntry);
+ assert.eq(cacheEntry.isPinned, true, cacheEntry);
} else {
assert.eq(cacheEntry, null);
}
diff --git a/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js b/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js
index bf2d6fe3c3e..21ff98a1ae1 100644
--- a/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js
+++ b/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js
@@ -59,7 +59,7 @@ if (!checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
assert.commandWorked(db.dropDatabase());
const coll = db.coll;
-assert.commandWorked(coll.createIndexes([{a: 1}, {a: 1, b: 1}]));
+assert.commandWorked(coll.createIndex({a: 1}));
const filter = {
a: 1,
@@ -68,16 +68,17 @@ const filter = {
const cacheKey = getPlanCacheKeyFromShape({query: filter, collection: coll, db: db});
function createCacheEntry() {
- // Run the query twice so that the cache entry gets activated.
- [...Array(2)].forEach(() => assert.eq(0, coll.find(filter).itcount()));
+ assert.eq(0, coll.find(filter).itcount());
const cacheContents =
coll.aggregate([{$planCacheStats: {}}, {$match: {planCacheKey: cacheKey}}]).toArray();
// We expect to see a single SBE cache entry.
assert.eq(cacheContents.length, 1, cacheContents);
const cacheEntry = cacheContents[0];
+ // Since there is just a single indexed plan available, we expect the cache entry to be pinned
+ // and active after running the query just once.
assert.eq(cacheEntry.version, "2", cacheContents);
assert.eq(cacheEntry.isActive, true, cacheContents);
- assert.eq(cacheEntry.isPinned, false, cacheContents);
+ assert.eq(cacheEntry.isPinned, true, cacheContents);
}
function assertCacheCleared() {
diff --git a/src/mongo/db/auth/authz_manager_external_state_d.cpp b/src/mongo/db/auth/authz_manager_external_state_d.cpp
index 2f0618a200b..b602cc1b963 100644
--- a/src/mongo/db/auth/authz_manager_external_state_d.cpp
+++ b/src/mongo/db/auth/authz_manager_external_state_d.cpp
@@ -95,7 +95,7 @@ bool AuthzManagerExternalStateMongod::hasOne(OperationContext* opCtx,
const NamespaceString& collectionName,
const BSONObj& query) {
AutoGetCollectionForReadCommandMaybeLockFree ctx(opCtx, collectionName);
- return !Helpers::findOne(opCtx, ctx.getCollection(), query, false).isNull();
+ return !Helpers::findOne(opCtx, ctx.getCollection(), query).isNull();
}
namespace {
diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp
index 8dd89b032dd..64412e5a2fd 100644
--- a/src/mongo/db/dbhelpers.cpp
+++ b/src/mongo/db/dbhelpers.cpp
@@ -59,15 +59,11 @@ using std::set;
using std::string;
using std::unique_ptr;
-/* fetch a single object from collection ns that matches query
- set your db SavedContext first
-*/
bool Helpers::findOne(OperationContext* opCtx,
const CollectionPtr& collection,
const BSONObj& query,
- BSONObj& result,
- bool requireIndex) {
- RecordId loc = findOne(opCtx, collection, query, requireIndex);
+ BSONObj& result) {
+ RecordId loc = findOne(opCtx, collection, query);
if (loc.isNull())
return false;
result = collection->docFor(opCtx, loc).value();
@@ -79,8 +75,7 @@ BSONObj Helpers::findOneForTesting(OperationContext* opCtx,
const BSONObj& query,
const bool invariantOnError) {
BSONObj ret;
- const bool requiresIndex = true;
- bool found = findOne(opCtx, collection, query, ret, requiresIndex);
+ bool found = findOne(opCtx, collection, query, ret);
if (invariantOnError) {
invariant(found);
}
@@ -94,20 +89,18 @@ BSONObj Helpers::findOneForTesting(OperationContext* opCtx,
*/
RecordId Helpers::findOne(OperationContext* opCtx,
const CollectionPtr& collection,
- const BSONObj& query,
- bool requireIndex) {
+ const BSONObj& query) {
if (!collection)
return RecordId();
auto findCommand = std::make_unique<FindCommandRequest>(collection->ns());
findCommand->setFilter(query);
- return findOne(opCtx, collection, std::move(findCommand), requireIndex);
+ return findOne(opCtx, collection, std::move(findCommand));
}
RecordId Helpers::findOne(OperationContext* opCtx,
const CollectionPtr& collection,
- std::unique_ptr<FindCommandRequest> findCommand,
- bool requireIndex) {
+ std::unique_ptr<FindCommandRequest> findCommand) {
if (!collection)
return RecordId();
@@ -125,13 +118,12 @@ RecordId Helpers::findOne(OperationContext* opCtx,
massertStatusOK(statusWithCQ.getStatus());
unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
- size_t options = requireIndex ? QueryPlannerParams::NO_TABLE_SCAN : QueryPlannerParams::DEFAULT;
auto exec = uassertStatusOK(getExecutor(opCtx,
&collection,
std::move(cq),
nullptr /* extractAndAttachPipelineStages */,
PlanYieldPolicy::YieldPolicy::NO_YIELD,
- options));
+ QueryPlannerParams::DEFAULT));
PlanExecutor::ExecState state;
BSONObj obj;
diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h
index 2115fa7a144..b975bceaf21 100644
--- a/src/mongo/db/dbhelpers.h
+++ b/src/mongo/db/dbhelpers.h
@@ -50,24 +50,20 @@ class FindCommandRequest;
struct Helpers {
/**
* Executes the given match expression ('query') and returns true if there is at least one
- * one matching document. The first found matching document is returned via the 'result' output
+ * matching document. The first found matching document is returned via the 'result' output
* parameter.
*
- * If 'requireIndex' is true, then this forces the query system to choose an indexed plan. An
- * exception is thrown if no 'requireIndex' is set to true but no indexed plan exists.
- *
* Performs the read successfully regardless of a replica set node's state, meaning that the
* node does not need to be primary or secondary.
*/
static bool findOne(OperationContext* opCtx,
const CollectionPtr& collection,
const BSONObj& query,
- BSONObj& result,
- bool requireIndex = false);
+ BSONObj& result);
/**
- * If `invariantOnError` is true, an error (e.g: no document found) will crash the
- * process. Otherwise the empty BSONObj will be returned.
+ * If `invariantOnError` is true, an error (e.g: no document found) will crash the process.
+ * Otherwise the empty BSONObj will be returned.
*/
static BSONObj findOneForTesting(OperationContext* opCtx,
const CollectionPtr& collection,
@@ -80,16 +76,16 @@ struct Helpers {
*/
static RecordId findOne(OperationContext* opCtx,
const CollectionPtr& collection,
- const BSONObj& query,
- bool requireIndex);
+ const BSONObj& query);
static RecordId findOne(OperationContext* opCtx,
const CollectionPtr& collection,
- std::unique_ptr<FindCommandRequest> qr,
- bool requireIndex);
+ std::unique_ptr<FindCommandRequest> qr);
/**
- * @param foundIndex if passed in will be set to 1 if ns and index found
- * @return true if object found
+ * If 'indexFound' is not nullptr, will be set to true if the query was answered using the _id
+ * index or using a clustered _id index.
+ *
+ * Returns true if a matching document was found.
*/
static bool findById(OperationContext* opCtx,
Database* db,
diff --git a/src/mongo/db/index_build_entry_helpers.cpp b/src/mongo/db/index_build_entry_helpers.cpp
index abae656e504..a1e55b6b479 100644
--- a/src/mongo/db/index_build_entry_helpers.cpp
+++ b/src/mongo/db/index_build_entry_helpers.cpp
@@ -305,11 +305,8 @@ StatusWith<IndexBuildEntry> getIndexBuildEntry(OperationContext* opCtx, UUID ind
// exceptions and we must protect it from unanticipated write conflicts from reads.
bool foundObj = writeConflictRetry(
opCtx, "getIndexBuildEntry", NamespaceString::kIndexBuildEntryNamespace.ns(), [&]() {
- return Helpers::findOne(opCtx,
- collection.getCollection(),
- BSON("_id" << indexBuildUUID),
- obj,
- /*requireIndex=*/true);
+ return Helpers::findOne(
+ opCtx, collection.getCollection(), BSON("_id" << indexBuildUUID), obj);
});
if (!foundObj) {
diff --git a/src/mongo/db/pipeline/document_source_lookup.cpp b/src/mongo/db/pipeline/document_source_lookup.cpp
index c76d57a596c..f6f53594e7e 100644
--- a/src/mongo/db/pipeline/document_source_lookup.cpp
+++ b/src/mongo/db/pipeline/document_source_lookup.cpp
@@ -156,6 +156,7 @@ DocumentSourceLookUp::DocumentSourceLookUp(
_resolvedPipeline = resolvedNamespace.pipeline;
_fromExpCtx = expCtx->copyForSubPipeline(resolvedNamespace.ns, resolvedNamespace.uuid);
+ _fromExpCtx->inLookup = true;
if (fromCollator) {
_fromExpCtx->setCollator(std::move(fromCollator.get()));
_hasExplicitCollation = true;
diff --git a/src/mongo/db/pipeline/expression_context.cpp b/src/mongo/db/pipeline/expression_context.cpp
index 55b67e3857a..2c41c612595 100644
--- a/src/mongo/db/pipeline/expression_context.cpp
+++ b/src/mongo/db/pipeline/expression_context.cpp
@@ -225,6 +225,8 @@ intrusive_ptr<ExpressionContext> ExpressionContext::copyWith(
expCtx->originalAggregateCommand = originalAggregateCommand.getOwned();
+ expCtx->inLookup = inLookup;
+
// Note that we intentionally skip copying the value of '_interruptCounter' because 'expCtx' is
// intended to be used for executing a separate aggregation pipeline.
diff --git a/src/mongo/db/pipeline/expression_context.h b/src/mongo/db/pipeline/expression_context.h
index 8486aed7e98..3f83fba6c8e 100644
--- a/src/mongo/db/pipeline/expression_context.h
+++ b/src/mongo/db/pipeline/expression_context.h
@@ -433,6 +433,9 @@ public:
// Tracks the depth of nested aggregation sub-pipelines. Used to enforce depth limits.
long long subPipelineDepth = 0;
+ // True if this 'ExpressionContext' object is for the inner side of a $lookup.
+ bool inLookup = false;
+
// If set, this will disallow use of features introduced in versions above the provided version.
boost::optional<multiversion::FeatureCompatibilityVersion> maxFeatureCompatibilityVersion;
diff --git a/src/mongo/db/query/classic_plan_cache.cpp b/src/mongo/db/query/classic_plan_cache.cpp
index 31ac6d13550..5f33481fef6 100644
--- a/src/mongo/db/query/classic_plan_cache.cpp
+++ b/src/mongo/db/query/classic_plan_cache.cpp
@@ -150,12 +150,12 @@ bool shouldCacheQuery(const CanonicalQuery& query) {
// don't affect cache state, and it also makes sure that we can always generate information
// regarding rejected plans and/or trial period execution of candidate plans.
//
- // In order to be able to correctly measure 'executionTimeMillis' stats we should only skip
- // caching top-level plans. Otherwise, if we were to skip caching inner pipelines for $lookup
- // queries, we could run through the multi-planner for each document coming from the outer side,
- // completely skewing the 'executionTimeMillis' stats.
+ // There is one exception: $lookup's implementation in the DocumentSource engine relies on
+ // caching the plan on the inner side in order to avoid repeating the planning process for every
+ // document on the outer side. To ensure that the 'executionTimeMillis' value is accurate for
+ // $lookup, we allow the inner side to use the cache even if the query is an explain.
tassert(6497600, "expCtx is null", query.getExpCtxRaw());
- if (query.getExplain() && query.getExpCtxRaw()->subPipelineDepth == 0) {
+ if (query.getExplain() && !query.getExpCtxRaw()->inLookup) {
return false;
}
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 3ea135da726..07b2e440e64 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -1343,11 +1343,10 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getSlotBasedExe
roots[0] = helper.buildExecutableTree(*(solutions[0]));
}
auto&& [root, data] = roots[0];
- // TODO SERVER-64315: re-enable caching of single solution plans
- // if (!planningResult->recoveredPinnedCacheEntry()) {
- // plan_cache_util::updatePlanCache(
- // opCtx, collections.getMainCollection(), *cq, *solutions[0], *root, data);
- // }
+ if (!planningResult->recoveredPinnedCacheEntry()) {
+ plan_cache_util::updatePlanCache(
+ opCtx, collections.getMainCollection(), *cq, *solutions[0], *root, data);
+ }
// Prepare the SBE tree for execution.
stage_builder::prepareSlotBasedExecutableTree(
diff --git a/src/mongo/db/query/sbe_sub_planner.cpp b/src/mongo/db/query/sbe_sub_planner.cpp
index 377c65d2d3e..e5e714ad3aa 100644
--- a/src/mongo/db/query/sbe_sub_planner.cpp
+++ b/src/mongo/db/query/sbe_sub_planner.cpp
@@ -117,8 +117,7 @@ CandidatePlans SubPlanner::plan(
// TODO SERVER-61507: do it unconditionally when $group pushdown is integrated with the SBE plan
// cache.
if (_cq.pipeline().empty()) {
- // TODO SERVER-64315: re-enable caching of single solution plans
- // plan_cache_util::updatePlanCache(_opCtx, mainColl, _cq, *compositeSolution, *root, data);
+ plan_cache_util::updatePlanCache(_opCtx, mainColl, _cq, *compositeSolution, *root, data);
}
return {makeVector(plan_ranker::CandidatePlan{
diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp
index b4a1d1e88d4..0ea5095efee 100644
--- a/src/mongo/db/repl/oplog.cpp
+++ b/src/mongo/db/repl/oplog.cpp
@@ -1589,7 +1589,7 @@ Status applyOperation_inlock(OperationContext* opCtx,
Helpers::findById(opCtx, collection, updateCriteria).isNull()) ||
// capped collections won't have an _id index
(!indexCatalog->haveIdIndex(opCtx) &&
- Helpers::findOne(opCtx, collection, updateCriteria, false).isNull())) {
+ Helpers::findOne(opCtx, collection, updateCriteria).isNull())) {
static constexpr char msg[] = "Couldn't find document";
LOGV2_ERROR(21259, msg, "op"_attr = redact(op.toBSONForLogging()));
return Status(ErrorCodes::UpdateOperationFailed,
diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp
index d3310b3048c..35951278ee1 100644
--- a/src/mongo/db/repl/rs_rollback.cpp
+++ b/src/mongo/db/repl/rs_rollback.cpp
@@ -1747,7 +1747,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
if (collection && removeSaver) {
BSONObj obj;
- bool found = Helpers::findOne(opCtx, collection.get(), pattern, obj, false);
+ bool found = Helpers::findOne(opCtx, collection.get(), pattern, obj);
if (found) {
auto status = removeSaver->goingToDelete(obj);
if (!status.isOK()) {
@@ -1798,8 +1798,7 @@ void rollback_internal::syncFixUp(OperationContext* opCtx,
const auto clock = opCtx->getServiceContext()->getFastClockSource();
const auto findOneStart = clock->now();
- RecordId loc =
- Helpers::findOne(opCtx, collection.get(), pattern, false);
+ RecordId loc = Helpers::findOne(opCtx, collection.get(), pattern);
if (clock->now() - findOneStart > Milliseconds(200))
LOGV2_WARNING(
21726,
diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp
index db56bcceff1..2811b445834 100644
--- a/src/mongo/db/repl/storage_interface_impl.cpp
+++ b/src/mongo/db/repl/storage_interface_impl.cpp
@@ -1450,7 +1450,7 @@ Status StorageInterfaceImpl::isAdminDbValid(OperationContext* opCtx) {
CollectionPtr usersCollection =
catalog->lookupCollectionByNamespace(opCtx, AuthorizationManager::usersCollectionNamespace);
const bool hasUsers =
- usersCollection && !Helpers::findOne(opCtx, usersCollection, BSONObj(), false).isNull();
+ usersCollection && !Helpers::findOne(opCtx, usersCollection, BSONObj()).isNull();
CollectionPtr adminVersionCollection = catalog->lookupCollectionByNamespace(
opCtx, AuthorizationManager::versionCollectionNamespace);
BSONObj authSchemaVersionDocument;
diff --git a/src/mongo/db/repl/storage_timestamp_test.cpp b/src/mongo/db/repl/storage_timestamp_test.cpp
index fead4dab5ee..7919675a946 100644
--- a/src/mongo/db/repl/storage_timestamp_test.cpp
+++ b/src/mongo/db/repl/storage_timestamp_test.cpp
@@ -2754,8 +2754,7 @@ TEST_F(StorageTimestampTest, IndexBuildsResolveErrorsDuringStateChangeToPrimary)
// to the side writes table and must be drained.
Helpers::upsert(_opCtx, collection->ns().ns(), BSON("_id" << 0 << "a" << 1 << "b" << 1));
{
- RecordId badRecord =
- Helpers::findOne(_opCtx, collection.get(), BSON("_id" << 1), false /* requireIndex */);
+ RecordId badRecord = Helpers::findOne(_opCtx, collection.get(), BSON("_id" << 1));
WriteUnitOfWork wuow(_opCtx);
collection->deleteDocument(_opCtx, kUninitializedStmtId, badRecord, nullptr);
wuow.commit();
diff --git a/src/mongo/db/repl/tenant_migration_donor_service.cpp b/src/mongo/db/repl/tenant_migration_donor_service.cpp
index dbbd56c654f..e7912da9577 100644
--- a/src/mongo/db/repl/tenant_migration_donor_service.cpp
+++ b/src/mongo/db/repl/tenant_migration_donor_service.cpp
@@ -544,10 +544,8 @@ ExecutorFuture<repl::OpTime> TenantMigrationDonorService::Instance::_updateState
opCtx, "TenantMigrationDonorUpdateStateDoc", _stateDocumentsNS.ns(), [&] {
WriteUnitOfWork wuow(opCtx);
- const auto originalRecordId = Helpers::findOne(opCtx,
- collection.getCollection(),
- originalStateDocBson,
- false /* requireIndex */);
+ const auto originalRecordId = Helpers::findOne(
+ opCtx, collection.getCollection(), originalStateDocBson);
const auto originalSnapshot = Snapshotted<BSONObj>(
opCtx->recoveryUnit()->getSnapshotId(), originalStateDocBson);
invariant(!originalRecordId.isNull());
diff --git a/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp b/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp
index c3e17f2e27f..ce994c1581e 100644
--- a/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp
+++ b/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp
@@ -147,11 +147,8 @@ StatusWith<TenantMigrationRecipientDocument> getStateDoc(OperationContext* opCtx
}
BSONObj result;
- auto foundDoc = Helpers::findOne(opCtx,
- collection.getCollection(),
- BSON("_id" << migrationUUID),
- result,
- /*requireIndex=*/true);
+ auto foundDoc =
+ Helpers::findOne(opCtx, collection.getCollection(), BSON("_id" << migrationUUID), result);
if (!foundDoc) {
return Status(ErrorCodes::NoMatchingDocument,
str::stream() << "No matching state doc found with tenant migration UUID: "
diff --git a/src/mongo/db/repl/tenant_oplog_applier.cpp b/src/mongo/db/repl/tenant_oplog_applier.cpp
index 8a76189a52c..38eb4edfd71 100644
--- a/src/mongo/db/repl/tenant_oplog_applier.cpp
+++ b/src/mongo/db/repl/tenant_oplog_applier.cpp
@@ -983,9 +983,7 @@ Status TenantOplogApplier::_applyOplogEntryOrGroupedInserts(
// During tenant migration oplog application, we only need to apply createIndex on empty
// collections. Otherwise, the index is guaranteed to be dropped after. This is because
// we block index builds on the donor for the duration of the tenant migration.
- if (!Helpers::findOne(
- opCtx, autoColl.getCollection(), BSONObj(), false /* requireIndex */)
- .isNull()) {
+ if (!Helpers::findOne(opCtx, autoColl.getCollection(), BSONObj()).isNull()) {
LOGV2_DEBUG(5652701,
2,
"Tenant migration ignoring createIndex for non-empty collection",
diff --git a/src/mongo/db/s/resharding/resharding_data_copy_util.cpp b/src/mongo/db/s/resharding/resharding_data_copy_util.cpp
index 8be9fbb4669..12efd935005 100644
--- a/src/mongo/db/s/resharding/resharding_data_copy_util.cpp
+++ b/src/mongo/db/s/resharding/resharding_data_copy_util.cpp
@@ -187,8 +187,7 @@ boost::optional<Document> findDocWithHighestInsertedId(OperationContext* opCtx,
findCommand->setLimit(1);
findCommand->setSort(BSON("_id" << -1));
- auto recordId =
- Helpers::findOne(opCtx, collection, std::move(findCommand), true /* requireIndex */);
+ auto recordId = Helpers::findOne(opCtx, collection, std::move(findCommand));
if (recordId.isNull()) {
return boost::none;
}
diff --git a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
index 10a777709f0..fb3925ea159 100644
--- a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
+++ b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp
@@ -269,7 +269,7 @@ protected:
const ReshardingEnv& env) {
AutoGetCollection coll(opCtx, nss, MODE_IX);
- RecordId rid = Helpers::findOne(opCtx, coll.getCollection(), query, false);
+ RecordId rid = Helpers::findOne(opCtx, coll.getCollection(), query);
ASSERT(!rid.isNull());
WriteUnitOfWork wuow(opCtx);
diff --git a/src/mongo/db/serverless/shard_split_utils.cpp b/src/mongo/db/serverless/shard_split_utils.cpp
index 7ed34524b43..63cb35a161c 100644
--- a/src/mongo/db/serverless/shard_split_utils.cpp
+++ b/src/mongo/db/serverless/shard_split_utils.cpp
@@ -195,8 +195,7 @@ StatusWith<ShardSplitDonorDocument> getStateDocument(OperationContext* opCtx,
auto foundDoc = Helpers::findOne(opCtx,
collection.getCollection(),
BSON(ShardSplitDonorDocument::kIdFieldName << shardSplitId),
- result,
- true);
+ result);
if (!foundDoc) {
return Status(ErrorCodes::NoMatchingDocument,
diff --git a/src/mongo/db/views/durable_view_catalog.cpp b/src/mongo/db/views/durable_view_catalog.cpp
index bbc2d1f59ef..c96b7af6672 100644
--- a/src/mongo/db/views/durable_view_catalog.cpp
+++ b/src/mongo/db/views/durable_view_catalog.cpp
@@ -198,8 +198,7 @@ void DurableViewCatalogImpl::upsert(OperationContext* opCtx,
CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, systemViewsNs);
invariant(systemViews);
- const bool requireIndex = false;
- RecordId id = Helpers::findOne(opCtx, systemViews, BSON("_id" << name.ns()), requireIndex);
+ RecordId id = Helpers::findOne(opCtx, systemViews, BSON("_id" << name.ns()));
Snapshotted<BSONObj> oldView;
if (!id.isValid() || !systemViews->findDoc(opCtx, id, &oldView)) {
@@ -231,8 +230,7 @@ void DurableViewCatalogImpl::remove(OperationContext* opCtx, const NamespaceStri
if (!systemViews)
return;
- const bool requireIndex = false;
- RecordId id = Helpers::findOne(opCtx, systemViews, BSON("_id" << name.ns()), requireIndex);
+ RecordId id = Helpers::findOne(opCtx, systemViews, BSON("_id" << name.ns()));
if (!id.isValid())
return;
diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp
index 1309ecedc8c..f7aefecaf55 100644
--- a/src/mongo/dbtests/querytests.cpp
+++ b/src/mongo/dbtests/querytests.cpp
@@ -188,45 +188,12 @@ public:
BSONObj query = fromjson("{$or:[{b:2},{c:3}]}");
BSONObj ret;
// Check findOne() returning object.
- ASSERT(Helpers::findOne(&_opCtx, _collection, query, ret, true));
+ ASSERT(Helpers::findOne(&_opCtx, _collection, query, ret));
ASSERT_EQUALS(string("b"), ret.firstElement().fieldName());
// Cross check with findOne() returning location.
ASSERT_BSONOBJ_EQ(
ret,
- _collection->docFor(&_opCtx, Helpers::findOne(&_opCtx, _collection, query, true))
- .value());
- }
-};
-
-class FindOneRequireIndex : public Base {
-public:
- void run() {
- insert(BSON("b" << 2 << "_id" << 0));
- BSONObj query = fromjson("{b:2}");
- BSONObj ret;
-
- // Check findOne() returning object, allowing unindexed scan.
- ASSERT(Helpers::findOne(&_opCtx, _collection, query, ret, false));
- // Check findOne() returning location, allowing unindexed scan.
- ASSERT_BSONOBJ_EQ(
- ret,
- _collection->docFor(&_opCtx, Helpers::findOne(&_opCtx, _collection, query, false))
- .value());
-
- // Check findOne() returning object, requiring indexed scan without index.
- ASSERT_THROWS(Helpers::findOne(&_opCtx, _collection, query, ret, true), AssertionException);
- // Check findOne() returning location, requiring indexed scan without index.
- ASSERT_THROWS(Helpers::findOne(&_opCtx, _collection, query, true), AssertionException);
-
- addIndex(IndexSpec().addKey("b").unique(false));
-
- // Check findOne() returning object, requiring indexed scan with index.
- ASSERT(Helpers::findOne(&_opCtx, _collection, query, ret, true));
- // Check findOne() returning location, requiring indexed scan with index.
- ASSERT_BSONOBJ_EQ(
- ret,
- _collection->docFor(&_opCtx, Helpers::findOne(&_opCtx, _collection, query, true))
- .value());
+ _collection->docFor(&_opCtx, Helpers::findOne(&_opCtx, _collection, query)).value());
}
};
@@ -262,12 +229,11 @@ public:
insert(BSONObj());
BSONObj query;
BSONObj ret;
- ASSERT(Helpers::findOne(&_opCtx, _collection, query, ret, false));
+ ASSERT(Helpers::findOne(&_opCtx, _collection, query, ret));
ASSERT(ret.isEmpty());
ASSERT_BSONOBJ_EQ(
ret,
- _collection->docFor(&_opCtx, Helpers::findOne(&_opCtx, _collection, query, false))
- .value());
+ _collection->docFor(&_opCtx, Helpers::findOne(&_opCtx, _collection, query)).value());
}
};
@@ -1432,7 +1398,7 @@ public:
ASSERT_EQUALS(50, count());
BSONObj res;
- ASSERT(Helpers::findOne(&_opCtx, ctx.getCollection(), BSON("_id" << 20), res, true));
+ ASSERT(Helpers::findOne(&_opCtx, ctx.getCollection(), BSON("_id" << 20), res));
ASSERT_EQUALS(40, res["x"].numberInt());
ASSERT(Helpers::findById(&_opCtx, ctx.db(), ns(), BSON("_id" << 20), res));
@@ -1447,8 +1413,7 @@ public:
{
Timer t;
for (int i = 0; i < n; i++) {
- ASSERT(
- Helpers::findOne(&_opCtx, ctx.getCollection(), BSON("_id" << 20), res, true));
+ ASSERT(Helpers::findOne(&_opCtx, ctx.getCollection(), BSON("_id" << 20), res));
}
slow = t.micros();
}
@@ -1936,7 +1901,6 @@ public:
void setupTests() {
add<FindingStart>();
add<FindOneOr>();
- add<FindOneRequireIndex>();
add<FindOneEmptyObj>();
add<BoundedKey>();
add<GetMore>();