summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMihai Andrei <mihai.andrei@mongodb.com>2023-02-03 18:09:28 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-02-03 19:52:33 +0000
commit56616080c12298229fc6e3cc71ace4c85ac973f4 (patch)
tree2eec3256ce8903a95085ee65dd7e5a0894cd739b
parentd5e1315e25a60019434fa2379a2168c29b5cd93a (diff)
downloadmongo-56616080c12298229fc6e3cc71ace4c85ac973f4.tar.gz
SERVER-71798 Expand the set of queries eligible for SBE in the 6.3 release
-rw-r--r--buildscripts/gdb/mongo_printers.py3
-rw-r--r--buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_security_token_jscore_passthrough.yml1
-rw-r--r--jstests/aggregation/add_with_date.js2
-rw-r--r--jstests/aggregation/explain_limit.js2
-rw-r--r--jstests/aggregation/optimize_away_pipeline.js89
-rw-r--r--jstests/core/columnstore/column_scan_skip_row_store_projection.js4
-rw-r--r--jstests/core/columnstore/column_store_index_compression.js5
-rw-r--r--jstests/core/columnstore/columnstore_eligibility.js33
-rw-r--r--jstests/core/computed_projections.js9
-rw-r--r--jstests/core/index/hidden_index.js3
-rw-r--r--jstests/core/index/index_filter_commands.js5
-rw-r--r--jstests/core/index/index_filter_commands_invalidate_plan_cache_entries.js2
-rw-r--r--jstests/core/index/indexj.js38
-rw-r--r--jstests/core/index/wildcard/wildcard_index_cached_plans.js2
-rw-r--r--jstests/core/query/explode_for_sort_plan_cache.js4
-rw-r--r--jstests/core/query/expr/expr_index_use.js6
-rw-r--r--jstests/core/query/idhack.js3
-rw-r--r--jstests/core/query/null_query_semantics.js4
-rw-r--r--jstests/core/query/or/or_to_in.js6
-rw-r--r--jstests/core/query/plan_cache/cached_plan_trial_does_not_discard_work.js5
-rw-r--r--jstests/core/query/plan_cache/collation_plan_cache.js3
-rw-r--r--jstests/core/query/plan_cache/plan_cache_clear.js4
-rw-r--r--jstests/core/query/plan_cache/plan_cache_list_plans.js2
-rw-r--r--jstests/core/query/plan_cache/plan_cache_list_shapes.js4
-rw-r--r--jstests/core/query/plan_cache/plan_cache_sbe.js6
-rw-r--r--jstests/core/query/plan_cache/plan_cache_shell_helpers.js2
-rw-r--r--jstests/core/query/project/projection_expr_mod.js9
-rw-r--r--jstests/core/query/project/projection_semantics.js5
-rw-r--r--jstests/core/query/query_hash_stability.js2
-rw-r--r--jstests/core/sbe/from_plan_cache_flag.js11
-rw-r--r--jstests/core/sbe/plan_cache_sbe_with_or_queries.js5
-rw-r--r--jstests/core/sbe/sbe_explain_rejected_plans.js9
-rw-r--r--jstests/core/sbe/sbe_ixscan_explain.js7
-rw-r--r--jstests/core/sbe_plan_cache_autoparameterize_collscan.js65
-rw-r--r--jstests/cqf/analyze/scalar_histograms.js6
-rw-r--r--jstests/libs/parallelTester.js9
-rw-r--r--jstests/libs/sbe_explain_helpers.js18
-rw-r--r--jstests/libs/sbe_util.js86
-rw-r--r--jstests/noPassthrough/columnstore_index_rowstore_settings.js5
-rw-r--r--jstests/noPassthrough/currentop_query.js2
-rw-r--r--jstests/noPassthrough/external_sort_find.js2
-rw-r--r--jstests/noPassthrough/log_and_profile_query_hash.js2
-rw-r--r--jstests/noPassthrough/lookup_metrics.js3
-rw-r--r--jstests/noPassthrough/lookup_pushdown.js174
-rw-r--r--jstests/noPassthrough/plan_cache_group_lookup.js32
-rw-r--r--jstests/noPassthrough/plan_cache_index_create.js4
-rw-r--r--jstests/noPassthrough/plan_cache_list_failed_plans.js4
-rw-r--r--jstests/noPassthrough/plan_cache_memory_debug_info.js4
-rw-r--r--jstests/noPassthrough/plan_cache_replan_group_lookup.js40
-rw-r--r--jstests/noPassthrough/plan_cache_replan_sort.js2
-rw-r--r--jstests/noPassthrough/plan_cache_stats_agg_source.js6
-rw-r--r--jstests/noPassthrough/query_engine_stats.js4
-rw-r--r--jstests/noPassthrough/restart_index_build_if_resume_fails.js6
-rw-r--r--jstests/noPassthrough/restart_index_build_if_resume_interrupted_by_shutdown.js3
-rw-r--r--jstests/noPassthrough/resumable_index_build_bulk_load_phase.js5
-rw-r--r--jstests/noPassthrough/resumable_index_build_bulk_load_phase_large.js5
-rw-r--r--jstests/noPassthrough/resumable_index_build_clearing_tmp_directory_on_restart.js3
-rw-r--r--jstests/noPassthrough/resumable_index_build_collection_scan_phase.js5
-rw-r--r--jstests/noPassthrough/resumable_index_build_collection_scan_phase_large.js3
-rw-r--r--jstests/noPassthrough/resumable_index_build_drain_writes_phase_primary.js4
-rw-r--r--jstests/noPassthrough/resumable_index_build_drain_writes_phase_secondary.js4
-rw-r--r--jstests/noPassthrough/resumable_index_build_initialized.js5
-rw-r--r--jstests/noPassthrough/resumable_index_build_mixed_phases.js5
-rw-r--r--jstests/noPassthrough/sbe_multiplanner_trial_termination.js4
-rw-r--r--jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js7
-rw-r--r--jstests/noPassthrough/sbe_plan_cache_key_reporting.js4
-rw-r--r--jstests/noPassthrough/sbe_plan_cache_memory_debug_info.js4
-rw-r--r--jstests/noPassthrough/sbe_plan_cache_size_metric.js4
-rw-r--r--jstests/noPassthrough/server_status_multiplanner.js4
-rw-r--r--jstests/noPassthroughWithMongod/columnstore_planning_heuristics.js5
-rw-r--r--jstests/noPassthroughWithMongod/group_pushdown.js56
-rw-r--r--jstests/noPassthroughWithMongod/index_bounds_static_limit.js4
-rw-r--r--jstests/noPassthroughWithMongod/ne_array_indexability.js2
-rw-r--r--jstests/noPassthroughWithMongod/plan_cache_replanning.js2
-rw-r--r--jstests/noPassthroughWithMongod/sbe_query_eligibility.js192
-rw-r--r--jstests/sharding/invalidate_plan_cache_entries_when_collection_generation_changes.js7
-rw-r--r--jstests/sharding/sbe_plan_cache_does_not_block_range_deletion.js12
-rw-r--r--src/mongo/db/commands/external_data_source_commands_test.cpp8
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp14
-rw-r--r--src/mongo/db/commands/plan_cache_clear_command.cpp19
-rw-r--r--src/mongo/db/exec/plan_cache_util.cpp3
-rw-r--r--src/mongo/db/exec/plan_cache_util.h54
-rw-r--r--src/mongo/db/exec/sbe/expressions/expression.cpp16
-rw-r--r--src/mongo/db/exec/sbe/values/value.cpp3
-rw-r--r--src/mongo/db/exec/sbe/values/value.h13
-rw-r--r--src/mongo/db/exec/sbe/values/value_printer.cpp6
-rw-r--r--src/mongo/db/exec/sbe/vm/vm.cpp42
-rw-r--r--src/mongo/db/exec/sbe/vm/vm.h6
-rw-r--r--src/mongo/db/exec/sbe/vm/vm_printer.cpp36
-rw-r--r--src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp31
-rw-r--r--src/mongo/db/query/canonical_query.cpp9
-rw-r--r--src/mongo/db/query/canonical_query_encoder.cpp4
-rw-r--r--src/mongo/db/query/canonical_query_encoder_test.cpp5
-rw-r--r--src/mongo/db/query/canonical_query_test.cpp2
-rw-r--r--src/mongo/db/query/classic_plan_cache.cpp3
-rw-r--r--src/mongo/db/query/explain.cpp1
-rw-r--r--src/mongo/db/query/get_executor.cpp167
-rw-r--r--src/mongo/db/query/projection.h7
-rw-r--r--src/mongo/db/query/query_feature_flags.idl3
-rw-r--r--src/mongo/db/query/query_utils.cpp22
-rw-r--r--src/mongo/db/query/query_utils.h6
-rw-r--r--src/mongo/db/query/sbe_cached_solution_planner.cpp60
-rw-r--r--src/mongo/db/query/sbe_plan_cache.cpp116
-rw-r--r--src/mongo/db/query/sbe_stage_builder.cpp14
-rw-r--r--src/mongo/db/query/sbe_stage_builder_filter.cpp43
-rw-r--r--src/mongo/db/s/collection_sharding_runtime.cpp86
106 files changed, 852 insertions, 1065 deletions
diff --git a/buildscripts/gdb/mongo_printers.py b/buildscripts/gdb/mongo_printers.py
index 1c2929870f7..5bd9d5330e5 100644
--- a/buildscripts/gdb/mongo_printers.py
+++ b/buildscripts/gdb/mongo_printers.py
@@ -849,9 +849,6 @@ class SbeCodeFragmentPrinter(object):
'Instruction::Constants: ' + str(const_enum) + \
", offset: " + str(read_as_integer_signed(cur_op, int_size))
cur_op += int_size
- elif op_name in ['applyClassicMatcher']:
- args = 'MatchExpression* ' + hex(read_as_integer(cur_op, ptr_size))
- cur_op += ptr_size
elif op_name in ['dateTruncImm']:
unit = read_as_integer(cur_op, time_unit_size)
cur_op += time_unit_size
diff --git a/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_security_token_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_security_token_jscore_passthrough.yml
index 205cb537143..bf664ae7043 100644
--- a/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_security_token_jscore_passthrough.yml
+++ b/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_security_token_jscore_passthrough.yml
@@ -15,7 +15,6 @@ selector:
- uses_parallel_shell
# columnstore indexes are under development and cannot be used without enabling the feature flag
- featureFlagColumnstoreIndexes
- - featureFlagSbeFull
# Server side javascript not allowed in Serverless.
- requires_scripting
exclude_files:
diff --git a/jstests/aggregation/add_with_date.js b/jstests/aggregation/add_with_date.js
index b80c304e42c..4d76a6908d7 100644
--- a/jstests/aggregation/add_with_date.js
+++ b/jstests/aggregation/add_with_date.js
@@ -28,8 +28,6 @@ assert.commandWorked(coll.insert({
nanDecimal: NumberDecimal("NaN"),
}));
-const isSBEEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
-
// Adding a Decimal128 value to a date literal.
assert.eq(ISODate("2019-01-30T07:30:10.957Z"),
getResultOfExpression({$add: ["$decimalVal", ISODate("2019-01-30T07:30:10.137Z")]}));
diff --git a/jstests/aggregation/explain_limit.js b/jstests/aggregation/explain_limit.js
index 7ce18401062..5e017f9b74e 100644
--- a/jstests/aggregation/explain_limit.js
+++ b/jstests/aggregation/explain_limit.js
@@ -18,7 +18,7 @@ let coll = db.explain_limit;
const kCollSize = 105;
const kLimit = 10;
-const isSBEEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
+const isSBEEnabled = checkSBEEnabled(db);
// Return whether or explain() was successful and contained the appropriate fields given the
// requested verbosity. Checks that the number of documents examined and returned are correct given
diff --git a/jstests/aggregation/optimize_away_pipeline.js b/jstests/aggregation/optimize_away_pipeline.js
index 065691a0663..453ee78db40 100644
--- a/jstests/aggregation/optimize_away_pipeline.js
+++ b/jstests/aggregation/optimize_away_pipeline.js
@@ -639,33 +639,66 @@ function assertProjectionIsNotRemoved(pipeline, projectionType = "PROJECTION_SIM
assertProjectionCanBeRemovedBeforeGroup(
[{$project: {a: 1, b: 1}}, {$group: {_id: "$a", s: {$sum: "$b"}}}]);
-assertProjectionCanBeRemovedBeforeGroup(
- [{$project: {'a.b': 1, 'b.c': 1}}, {$group: {_id: "$a.b", s: {$sum: "$b.c"}}}],
- "PROJECTION_DEFAULT");
-
-// Test that an inclusion projection is NOT optimized away if it is NOT redundant. This one fails to
-// include a dependency of the $group and so will have an impact on the query results.
+// Test that an inclusion projection is NOT optimized away if it is NOT redundant. This one
+// fails to include a dependency of the $group and so will have an impact on the query results.
assertProjectionIsNotRemoved([{$project: {a: 1}}, {$group: {_id: "$a", s: {$sum: "$b"}}}]);
-// Test similar cases with dotted paths.
-assertProjectionIsNotRemoved([{$project: {'a.b': 1}}, {$group: {_id: "$a.b", s: {$sum: "$b"}}}],
- "PROJECTION_DEFAULT");
-assertProjectionIsNotRemoved([{$project: {'a.b': 1}}, {$group: {_id: "$a.b", s: {$sum: "$a.c"}}}],
- "PROJECTION_DEFAULT");
// TODO SERVER-67323 This one could be removed, but is left for future work.
assertProjectionIsNotRemoved(
[{$project: {a: 1, b: 1}}, {$group: {_id: "$a.b", s: {$sum: "$b.c"}}}]);
-// Spinoff on the one above: Without supporting this kind of prefixing analysis, we can confuse
-// ourselves with our dependency analysis. If the $group depends on both "path" and "path.subpath"
-// then it will generate a $project on only "path" to express its dependency set. We then fail to
-// optimize that out.
+// If the $group depends on both "path" and "path.subpath" then it will generate a $project on only
+// "path" to express its dependency set. We then fail to optimize that out. As a future improvement,
+// we could improve the optimizer to ensure that a projection stage is not present in the resulting
+// plan.
pipeline = [{$group: {_id: "$a.b", s: {$first: "$a"}}}];
// TODO SERVER-XYZ Assert this can be optimized out.
// assertProjectionCanBeRemovedBeforeGroup(pipeline, "PROJECTION_DEFAULT");
// assertProjectionCanBeRemovedBeforeGroup(pipeline, "PROJECTION_SIMPLE");
assertProjectionIsNotRemoved(pipeline);
+// Though $group is generally eligible for pushdown into SBE, such a pushdown may be inhibited by
+// dotted as well as computed projections. As such we only run the test cases below if SBE is fully
+// enabled.
+const sbeFull = checkSBEEnabled(db, ["featureFlagSbeFull"], true /* checkAllNodes */);
+if (sbeFull) {
+ assertProjectionCanBeRemovedBeforeGroup(
+ [{$project: {'a.b': 1, 'b.c': 1}}, {$group: {_id: "$a.b", s: {$sum: "$b.c"}}}],
+ "PROJECTION_DEFAULT");
+
+ // Test that a computed projection at the front of the pipeline is pushed down, even if there's
+ // no finite dependency set.
+ pipeline = [{$project: {x: {$add: ["$a", 1]}}}];
+ assertPipelineDoesNotUseAggregation(
+ {pipeline: pipeline, expectedStages: ["COLLSCAN", "PROJECTION_DEFAULT"]});
+
+ // The projections below are not removed because they fail to include the $group's dependencies.
+ assertProjectionIsNotRemoved([{$project: {'a.b': 1}}, {$group: {_id: "$a.b", s: {$sum: "$b"}}}],
+ "PROJECTION_DEFAULT");
+ assertProjectionIsNotRemoved(
+ [{$project: {'a.b': 1}}, {$group: {_id: "$a.b", s: {$sum: "$a.c"}}}], "PROJECTION_DEFAULT");
+
+ pipeline = [{$project: {a: {$add: ["$a", 1]}}}, {$group: {_id: "$a", s: {$sum: "$b"}}}];
+ assertPipelineIfGroupPushdown(
+ // Test that a computed projection at the front of the pipeline is pushed down when there's
+ // a finite dependency set. Additionally, the group pushdown shouldn't erase the computed
+ // projection.
+ function() {
+ explain = coll.explain().aggregate(pipeline);
+ assertPipelineDoesNotUseAggregation(
+ {pipeline: pipeline, expectedStages: ["COLLSCAN", "PROJECTION_DEFAULT", "GROUP"]});
+ },
+ // Test that a computed projection at the front of the pipeline is pushed down when there's
+ // a finite dependency set.
+ function() {
+ explain = coll.explain().aggregate(pipeline);
+ assertPipelineUsesAggregation({
+ pipeline: pipeline,
+ expectedStages: ["COLLSCAN", "PROJECTION_DEFAULT", "$group"],
+ });
+ });
+}
+
// We generate a projection stage from dependency analysis, even if the pipeline begins with an
// exclusion projection.
pipeline = [{$project: {c: 0}}, {$group: {_id: "$a", b: {$sum: "$b"}}}];
@@ -696,32 +729,6 @@ pipeline = [{$project: {x: 0}}];
assertPipelineDoesNotUseAggregation(
{pipeline: pipeline, expectedStages: ["PROJECTION_SIMPLE", "COLLSCAN"]});
-// Test that a computed projection at the front of the pipeline is pushed down, even if there's no
-// finite dependency set.
-pipeline = [{$project: {x: {$add: ["$a", 1]}}}];
-assertPipelineDoesNotUseAggregation(
- {pipeline: pipeline, expectedStages: ["COLLSCAN", "PROJECTION_DEFAULT"]});
-
-pipeline = [{$project: {a: {$add: ["$a", 1]}}}, {$group: {_id: "$a", s: {$sum: "$b"}}}];
-assertPipelineIfGroupPushdown(
- // Test that a computed projection at the front of the pipeline is pushed down when there's a
- // finite dependency set. Additionally, the group pushdown shouldn't erase the computed
- // projection.
- function() {
- explain = coll.explain().aggregate(pipeline);
- assertPipelineDoesNotUseAggregation(
- {pipeline: pipeline, expectedStages: ["COLLSCAN", "PROJECTION_DEFAULT", "GROUP"]});
- },
- // Test that a computed projection at the front of the pipeline is pushed down when there's a
- // finite dependency set.
- function() {
- explain = coll.explain().aggregate(pipeline);
- assertPipelineUsesAggregation({
- pipeline: pipeline,
- expectedStages: ["COLLSCAN", "PROJECTION_DEFAULT", "$group"],
- });
- });
-
// getMore cases.
// Test getMore on a collection with an optimized away pipeline.
diff --git a/jstests/core/columnstore/column_scan_skip_row_store_projection.js b/jstests/core/columnstore/column_scan_skip_row_store_projection.js
index b69e515d7ef..8124af3e446 100644
--- a/jstests/core/columnstore/column_scan_skip_row_store_projection.js
+++ b/jstests/core/columnstore/column_scan_skip_row_store_projection.js
@@ -30,8 +30,8 @@ load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
load("jstests/libs/clustered_collections/clustered_collection_util.js");
load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
-const columnstoreEnabled = checkSBEEnabled(
- db, ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"], true /* checkAllNodes */);
+const columnstoreEnabled =
+ checkSBEEnabled(db, ["featureFlagColumnstoreIndexes"], true /* checkAllNodes */);
if (!columnstoreEnabled) {
jsTestLog("Skipping columnstore index test since the feature flag is not enabled.");
return;
diff --git a/jstests/core/columnstore/column_store_index_compression.js b/jstests/core/columnstore/column_store_index_compression.js
index c66c12abef0..52b74a23a10 100644
--- a/jstests/core/columnstore/column_store_index_compression.js
+++ b/jstests/core/columnstore/column_store_index_compression.js
@@ -8,7 +8,6 @@
* # Column store indexes are still under a feature flag and require full SBE.
* uses_column_store_index,
* featureFlagColumnstoreIndexes,
- * featureFlagSbeFull,
*
* # In passthrough suites, this test makes direct connections to mongod instances that compose
* # the passthrough fixture in order to perform additional validation. Tenant migration,
@@ -28,8 +27,8 @@ load("jstests/libs/fixture_helpers.js"); // For isMongos
load("jstests/libs/index_catalog_helpers.js"); // For IndexCatalogHelpers
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-const columnstoreEnabled = checkSBEEnabled(
- db, ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"], true /* checkAllNodes */);
+const columnstoreEnabled =
+ checkSBEEnabled(db, ["featureFlagColumnstoreIndexes"], true /* checkAllNodes */);
if (!columnstoreEnabled) {
jsTestLog("Skipping columnstore index test since the feature flag is not enabled.");
return;
diff --git a/jstests/core/columnstore/columnstore_eligibility.js b/jstests/core/columnstore/columnstore_eligibility.js
index c65f4c50eff..791d9929608 100644
--- a/jstests/core/columnstore/columnstore_eligibility.js
+++ b/jstests/core/columnstore/columnstore_eligibility.js
@@ -25,8 +25,6 @@ if (!setUpServerForColumnStoreIndexTest(db)) {
return;
}
-const sbeFull = checkSBEEnabled(db, ["featureFlagSbeFull"]);
-
const coll = db.columnstore_eligibility;
coll.drop();
@@ -66,12 +64,35 @@ assert(planHasStage(db, explain, "COLUMN_SCAN"), explain);
// will be applied after assembling an intermediate result containing both "a" and "b".
explain = coll.find({$or: [{a: 2}, {b: 2}]}, {_id: 0, a: 1}).explain();
-// For top-level $or queries, COLUMN_SCAN is only used when sbeFull is also enabled due to a
-// quirk in the engine selection logic. TODO: SERVER-XYZ.
+// COLUMN_SCAN is used for top-level $or queries.
+assert(planHasStage(db, explain, "COLUMN_SCAN"), explain);
+
+// COLUMN_SCAN is only used for for certain top-level $or queries when sbeFull is also enabled due
+// to a quirk in the engine selection logic.
+const sbeFull = checkSBEEnabled(db, ["featureFlagSbeFull"]);
+explain = coll.explain().aggregate([
+ {$match: {$or: [{a: {$gt: 0}}, {b: {$gt: 0}}]}},
+ {$project: {_id: 0, computedField: {$add: ["$a", "$b"]}}},
+]);
+let planHasColumnScan = planHasStage(db, explain, "COLUMN_SCAN");
+
if (sbeFull) {
- assert(planHasStage(db, explain, "COLUMN_SCAN"), explain);
+ assert(planHasColumnScan, explain);
+} else {
+ assert(!planHasColumnScan, explain);
+}
+
+explain = coll.explain().aggregate([
+ {$match: {$or: [{a: {$gt: 0}}, {b: {$gt: 0}}]}},
+ {$project: {_id: 0, computedField: {$add: ["$a", "$b"]}}},
+ {$group: {_id: "$computedField"}}
+]);
+planHasColumnScan = planHasStage(db, explain, "COLUMN_SCAN");
+
+if (sbeFull) {
+ assert(planHasColumnScan, explain);
} else {
- assert(planHasStage(db, explain, "COLLSCAN"), explain);
+ assert(!planHasColumnScan, explain);
}
// Simplest case: just scan "a" column.
diff --git a/jstests/core/computed_projections.js b/jstests/core/computed_projections.js
index 06ea2476bd9..fd432771f44 100644
--- a/jstests/core/computed_projections.js
+++ b/jstests/core/computed_projections.js
@@ -2,14 +2,7 @@
"use strict";
load("jstests/aggregation/extras/utils.js"); // For arrayEq and orderedArrayEq.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabledOnSomeNode.
-
-const isSBEEnabled = checkSBEEnabledOnSomeNode(db);
-if (isSBEEnabled) {
- // Override error-code-checking APIs. We only load this when SBE is explicitly enabled, because
- // it causes failures in the parallel suites.
- load("jstests/libs/sbe_assert_error_override.js");
-}
+load("jstests/libs/sbe_assert_error_override.js");
// It is safe for other tests to run while this failpoint is active, so long as those tests do not
// use documents containing a field with "POISON" as their name. Note that this command can fail.
diff --git a/jstests/core/index/hidden_index.js b/jstests/core/index/hidden_index.js
index e1edef07fe7..572d9016ae0 100644
--- a/jstests/core/index/hidden_index.js
+++ b/jstests/core/index/hidden_index.js
@@ -25,8 +25,7 @@ load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
const columnstoreEnabled =
- checkSBEEnabled(
- db, ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"], true /* checkAllNodes */) &&
+ checkSBEEnabled(db, ["featureFlagColumnstoreIndexes"], true /* checkAllNodes */) &&
setUpServerForColumnStoreIndexTest(db);
const collName = "hidden_index";
diff --git a/jstests/core/index/index_filter_commands.js b/jstests/core/index/index_filter_commands.js
index 1272cb2fd19..429a79adea8 100644
--- a/jstests/core/index/index_filter_commands.js
+++ b/jstests/core/index/index_filter_commands.js
@@ -33,7 +33,8 @@
* assumes_read_preference_unchanged,
* assumes_unsharded_collection,
* does_not_support_stepdowns,
- * requires_fcv_60
+ * # The SBE plan cache was first enabled in 6.3.
+ * requires_fcv_63,
* ]
*/
@@ -344,7 +345,7 @@ assert.commandFailed(
filters = getFilters();
assert.eq(0, filters.length, tojson(filters));
-if (checkSBEEnabled(db, ["featureFlagSbeFull"], true /* checkAllNodes */)) {
+if (checkSBEEnabled(db)) {
//
// Test that planCacheSetFilter doesn't apply to the inner side of a $lookup.
//
diff --git a/jstests/core/index/index_filter_commands_invalidate_plan_cache_entries.js b/jstests/core/index/index_filter_commands_invalidate_plan_cache_entries.js
index 059241284ac..bc2dc74c0e1 100644
--- a/jstests/core/index/index_filter_commands_invalidate_plan_cache_entries.js
+++ b/jstests/core/index/index_filter_commands_invalidate_plan_cache_entries.js
@@ -27,7 +27,7 @@ load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
// For testing convenience this variable is made an integer "1" if SBE is fully enabled, because the
// expected amount of plan cache entries differs between the SBE plan cache and the classic one.
-const isSbeEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]) ? 1 : 0;
+const isSbeEnabled = checkSBEEnabled(db) ? 1 : 0;
const collName = "index_filter_commands_invalidate_plan_cache_entries";
const coll = db[collName];
diff --git a/jstests/core/index/indexj.js b/jstests/core/index/indexj.js
index 3023b661e59..93034fec923 100644
--- a/jstests/core/index/indexj.js
+++ b/jstests/core/index/indexj.js
@@ -7,12 +7,15 @@
// # Different assertions are made depending on whether SBE or classic is used. Implicitly
// # creating an index can change which engine is used.
// assumes_no_implicit_index_creation,
+// # This test assumes that either SBE or classic is fully enabled and that we're not running in
+// # a mixed version cluster.
+// requires_fcv_63,
// ]
(function() {
"use strict";
-load("jstests/libs/sbe_explain_helpers.js"); // For engineSpecificAssertion.
+load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled'.
const t = db[jsTestName()];
t.drop();
@@ -56,42 +59,39 @@ assert.commandWorked(t.insert({a: 2, b: 2}));
// SBE or the classic engine. This is because the classic engine will use a multi-interval index
// scan whereas SBE will decompose the intervals into a set of single-interval bounds and will end
// up examining 0 keys.
-let assertFn = function(expectedKeys, numKeysExamined) {
- return numKeysExamined === expectedKeys;
-};
+const isSBEEnabled = checkSBEEnabled(db);
+let expectedKeys = isSBEEnabled ? 0 : 3;
let errMsg = function(actualNumKeys) {
return "Chosen plan examined " + actualNumKeys + " keys";
};
-let sbeAssert = actualKeys => assertFn(0, actualKeys);
-let classicAssert = actualKeys => assertFn(3, actualKeys);
let numKeys = keysExamined({a: {$in: [1, 2]}, b: {$gt: 1, $lt: 2}}, {a: 1, b: 1});
-engineSpecificAssertion(classicAssert(numKeys), sbeAssert(numKeys), db, errMsg(numKeys));
+assert.eq(numKeys, expectedKeys, errMsg(numKeys));
numKeys = keysExamined({a: {$in: [1, 2]}, b: {$gt: 1, $lt: 2}}, {a: 1, b: 1}, {a: -1, b: -1});
-engineSpecificAssertion(classicAssert(numKeys), sbeAssert(numKeys), db, errMsg(numKeys));
+assert.eq(numKeys, expectedKeys, errMsg(numKeys));
assert.commandWorked(t.insert({a: 1, b: 1}));
assert.commandWorked(t.insert({a: 1, b: 1}));
numKeys = keysExamined({a: {$in: [1, 2]}, b: {$gt: 1, $lt: 2}}, {a: 1, b: 1});
-engineSpecificAssertion(classicAssert(numKeys), sbeAssert(numKeys), db, errMsg(numKeys));
+assert.eq(numKeys, expectedKeys, errMsg(numKeys));
numKeys = keysExamined({a: {$in: [1, 2]}, b: {$gt: 1, $lt: 2}}, {a: 1, b: 1});
-engineSpecificAssertion(classicAssert(numKeys), sbeAssert(numKeys), db, errMsg(numKeys));
+assert.eq(numKeys, expectedKeys, errMsg(numKeys));
numKeys = keysExamined({a: {$in: [1, 2]}, b: {$gt: 1, $lt: 2}}, {a: 1, b: 1}, {a: -1, b: -1});
-engineSpecificAssertion(classicAssert(numKeys), sbeAssert(numKeys), db, errMsg(numKeys));
+assert.eq(numKeys, expectedKeys, errMsg(numKeys));
// We examine one less key in the classic engine because the bounds are slightly tighter.
-classicAssert = actualKeys => assertFn(2, actualKeys);
-numKeys = keysExamined({a: {$in: [1, 1.9]}, b: {$gt: 1, $lt: 2}}, {a: 1, b: 1});
-engineSpecificAssertion(classicAssert(numKeys), sbeAssert(numKeys), db, errMsg(numKeys));
+if (!isSBEEnabled) {
+ expectedKeys = 2;
+}
+numKeys = keysExamined({a: {$in: [1, 1.9]}, b: {$gt: 1, $lt: 2}}, {a: 1, b: 1});
+assert.eq(numKeys, expectedKeys, errMsg(numKeys));
numKeys = keysExamined({a: {$in: [1.1, 2]}, b: {$gt: 1, $lt: 2}}, {a: 1, b: 1}, {a: -1, b: -1});
-engineSpecificAssertion(classicAssert(numKeys), sbeAssert(numKeys), db, errMsg(numKeys));
-
+assert.eq(numKeys, expectedKeys, errMsg(numKeys));
assert.commandWorked(t.insert({a: 1, b: 1.5}));
// We examine one extra key in both engines because we've inserted a document that falls within
// both sets of bounds being scanned.
-sbeAssert = actualKeys => assertFn(1, actualKeys);
-classicAssert = actualKeys => assertFn(4, actualKeys);
+expectedKeys = isSBEEnabled ? 1 : 4;
numKeys = keysExamined({a: {$in: [1, 2]}, b: {$gt: 1, $lt: 2}}, {a: 1, b: 1});
-engineSpecificAssertion(classicAssert(numKeys), sbeAssert(numKeys), db, errMsg(numKeys));
+assert.eq(numKeys, expectedKeys, errMsg(numKeys));
})();
diff --git a/jstests/core/index/wildcard/wildcard_index_cached_plans.js b/jstests/core/index/wildcard/wildcard_index_cached_plans.js
index f3a41f5361e..5e78a11d451 100644
--- a/jstests/core/index/wildcard/wildcard_index_cached_plans.js
+++ b/jstests/core/index/wildcard/wildcard_index_cached_plans.js
@@ -34,7 +34,7 @@ coll.drop();
assert.commandWorked(coll.createIndex({"b.$**": 1}));
assert.commandWorked(coll.createIndex({"a": 1}));
-const isSbeEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
+const isSbeEnabled = checkSBEEnabled(db);
// In order for the plan cache to be used, there must be more than one plan available. Insert
// data into the collection such that the b.$** index will be far more selective than the index
diff --git a/jstests/core/query/explode_for_sort_plan_cache.js b/jstests/core/query/explode_for_sort_plan_cache.js
index 3d489ad9d54..2da78284a6a 100644
--- a/jstests/core/query/explode_for_sort_plan_cache.js
+++ b/jstests/core/query/explode_for_sort_plan_cache.js
@@ -11,7 +11,7 @@
* # If all chunks are moved off of a shard, it can cause the plan cache to miss commands.
* assumes_balancer_off,
* assumes_unsharded_collection,
- * requires_fcv_62,
+ * requires_fcv_63,
* # Plan cache state is node-local and will not get migrated alongside tenant data.
* tenant_migration_incompatible,
* # Part of this test does different checks depending on the engine used. If an implicit index
@@ -65,7 +65,7 @@ const sortSpec = {
// TODO SERVER-67576: remove this branch once explode for sort plans are supported by the SBE plan
// cache.
-if (checkSBEEnabled(db, ["featureFlagSbeFull"])) {
+if (checkSBEEnabled(db)) {
// Run the query for the first time and make sure the plan hasn't been cached.
assert.eq(0, coll.find(querySpec).sort(sortSpec).itcount());
assertCacheEntryDoesNotExist(querySpec, sortSpec);
diff --git a/jstests/core/query/expr/expr_index_use.js b/jstests/core/query/expr/expr_index_use.js
index a0f85bc69c9..2eadaf645be 100644
--- a/jstests/core/query/expr/expr_index_use.js
+++ b/jstests/core/query/expr/expr_index_use.js
@@ -1,6 +1,7 @@
// Confirms expected index use when performing a match with a $expr statement.
// @tags: [
// assumes_read_concern_local,
+// requires_fcv_63,
// ]
(function() {
@@ -92,10 +93,7 @@ function confirmExpectedExprExecution(expr, metricsToCheck, collation) {
];
assert.eq(metricsToCheck.nReturned, coll.aggregate(pipelineWithProject, aggOptions).itcount());
let explain = coll.explain("executionStats").aggregate(pipelineWithProject, aggOptions);
- assert(getAggPlanStage(explain, "COLLSCAN", isSBEEnabled /* useQueryPlannerSection */) ||
- checkBothEnginesAreRunOnCluster(db) &&
- (getAggPlanStage(explain, "COLLSCAN", false /* useQueryPlannerSection */) ||
- getAggPlanStage(explain, "COLLSCAN", true /* useQueryPlannerSection */)),
+ assert(getAggPlanStage(explain, "COLLSCAN", isSBEEnabled /* useQueryPlannerSection */),
explain);
// Verifies that there are no rejected plans, and that the winning plan uses the expected
diff --git a/jstests/core/query/idhack.js b/jstests/core/query/idhack.js
index e60bd9c8a50..1ddab70d4cd 100644
--- a/jstests/core/query/idhack.js
+++ b/jstests/core/query/idhack.js
@@ -61,8 +61,7 @@ winningPlan = getWinningPlan(explain.queryPlanner);
assert(!isIdhack(db, winningPlan), winningPlan);
// Covered query returning _id field only can be handled by ID hack.
-const isSbeEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
-const parentStage = isSbeEnabled ? "PROJECTION_COVERED" : "FETCH";
+const parentStage = checkSBEEnabled(db) ? "PROJECTION_COVERED" : "FETCH";
explain = t.find(query, {_id: 1}).explain();
winningPlan = getWinningPlan(explain.queryPlanner);
assert(isIdhack(db, winningPlan), winningPlan);
diff --git a/jstests/core/query/null_query_semantics.js b/jstests/core/query/null_query_semantics.js
index a99741853d2..71cd4b7c84f 100644
--- a/jstests/core/query/null_query_semantics.js
+++ b/jstests/core/query/null_query_semantics.js
@@ -790,8 +790,8 @@ const keyPatterns = [
];
// Include Columnstore Index only if FF is enabled and collection is not clustered.
-const columnstoreEnabled = checkSBEEnabled(
- db, ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"], true /* checkAllNodes */);
+const columnstoreEnabled =
+ checkSBEEnabled(db, ["featureFlagColumnstoreIndexes"], true /* checkAllNodes */);
if (columnstoreEnabled && setUpServerForColumnStoreIndexTest(db)) {
keyPatterns.push({keyPattern: {"$**": "columnstore"}});
}
diff --git a/jstests/core/query/or/or_to_in.js b/jstests/core/query/or/or_to_in.js
index 5d3c745dc95..681e2af9e2a 100644
--- a/jstests/core/query/or/or_to_in.js
+++ b/jstests/core/query/or/or_to_in.js
@@ -4,6 +4,7 @@
// This test is not prepared to handle explain output for sharded collections.
// @tags: [
// assumes_unsharded_collection,
+// requires_fcv_63,
// ]
(function() {
@@ -11,7 +12,6 @@
load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
load("jstests/libs/analyze_plan.js");
-load("jstests/libs/sbe_util.js"); // For checkBothEnginesAreRunOnCluster.
var coll = db.orToIn;
coll.drop();
@@ -48,9 +48,7 @@ function assertEquivPlanAndResult(expectedQuery, actualQuery, supportWithCollati
// Make sure both queries have the same access plan.
const expectedPlan = getWinningPlan(expectedExplain.queryPlanner);
const actualPlan = getWinningPlan(actualExplain.queryPlanner);
- if (!checkBothEnginesAreRunOnCluster(db)) {
- assert.docEq(expectedPlan, actualPlan);
- }
+ assert.docEq(expectedPlan, actualPlan);
// The queries must produce the same result.
const expectedRes = coll.find(expectedQuery).toArray();
diff --git a/jstests/core/query/plan_cache/cached_plan_trial_does_not_discard_work.js b/jstests/core/query/plan_cache/cached_plan_trial_does_not_discard_work.js
index f83b9e6a817..941ec0106b5 100644
--- a/jstests/core/query/plan_cache/cached_plan_trial_does_not_discard_work.js
+++ b/jstests/core/query/plan_cache/cached_plan_trial_does_not_discard_work.js
@@ -12,7 +12,8 @@
// assumes_read_preference_unchanged,
// assumes_unsharded_collection,
// does_not_support_stepdowns,
-// requires_fcv_52,
+// # The SBE plan cache was first enabled in 6.3.
+// requires_fcv_63,
// requires_profiling,
// # Plan cache state is node-local and will not get migrated alongside tenant data.
// tenant_migration_incompatible,
@@ -25,7 +26,7 @@
load("jstests/libs/profiler.js"); // getLatestProfileEntry.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-if (!checkSBEEnabled(db, ["featureFlagSbeFull"])) {
+if (!checkSBEEnabled(db)) {
jsTestLog("Skipping test because SBE is disabled");
return;
}
diff --git a/jstests/core/query/plan_cache/collation_plan_cache.js b/jstests/core/query/plan_cache/collation_plan_cache.js
index da6c2f08081..99e983dd2fa 100644
--- a/jstests/core/query/plan_cache/collation_plan_cache.js
+++ b/jstests/core/query/plan_cache/collation_plan_cache.js
@@ -20,7 +20,6 @@
load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromExplain.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-const isSbeEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
var coll = db.collation_plan_cache;
coll.drop();
@@ -54,6 +53,8 @@ assert.commandWorked(
// The query shape should have been added.
var shapes = coll.aggregate([{$planCacheStats: {}}]).toArray();
assert.eq(1, shapes.length, 'unexpected cache size after running query');
+
+const isSbeEnabled = checkSBEEnabled(db);
if (!isSbeEnabled) {
assert.eq(shapes[0].createdFromQuery.query, {a: 'foo', b: 5}, shapes);
assert.eq(shapes[0].createdFromQuery.sort, {}, shapes);
diff --git a/jstests/core/query/plan_cache/plan_cache_clear.js b/jstests/core/query/plan_cache/plan_cache_clear.js
index 7aee144bc89..d03330ab08e 100644
--- a/jstests/core/query/plan_cache/plan_cache_clear.js
+++ b/jstests/core/query/plan_cache/plan_cache_clear.js
@@ -15,6 +15,8 @@
// assumes_unsharded_collection,
// # Plan cache state is node-local and will not get migrated alongside tenant data.
// tenant_migration_incompatible,
+// # The SBE plan cache was first enabled in 6.3.
+// requires_fcv_63,
// # TODO SERVER-67607: Test plan cache with CQF enabled.
// cqf_incompatible,
// ]
@@ -104,7 +106,7 @@ const nonExistentColl = db.plan_cache_clear_nonexistent;
nonExistentColl.drop();
assert.commandWorked(nonExistentColl.runCommand('planCacheClear'));
-if (checkSBEEnabled(db, ["featureFlagSbeFull"], true /* checkAllNodes */)) {
+if (checkSBEEnabled(db)) {
// Plan cache commands should work against the main collection only, not foreignColl
// collections, when $lookup is pushed down into SBE.
const foreignColl = db.plan_cache_clear_foreign;
diff --git a/jstests/core/query/plan_cache/plan_cache_list_plans.js b/jstests/core/query/plan_cache/plan_cache_list_plans.js
index 6521aac3448..b0f7a24c615 100644
--- a/jstests/core/query/plan_cache/plan_cache_list_plans.js
+++ b/jstests/core/query/plan_cache/plan_cache_list_plans.js
@@ -29,7 +29,7 @@ load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
let coll = db.jstests_plan_cache_list_plans;
coll.drop();
-const isSbeEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
+const isSbeEnabled = checkSBEEnabled(db);
function dumpPlanCacheState() {
return coll.aggregate([{$planCacheStats: {}}]).toArray();
diff --git a/jstests/core/query/plan_cache/plan_cache_list_shapes.js b/jstests/core/query/plan_cache/plan_cache_list_shapes.js
index 7d348cc21a2..48535eae0c3 100644
--- a/jstests/core/query/plan_cache/plan_cache_list_shapes.js
+++ b/jstests/core/query/plan_cache/plan_cache_list_shapes.js
@@ -20,8 +20,8 @@
'use strict';
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-if (checkSBEEnabled(db, ["featureFlagSbeFull"])) {
- jsTest.log("Skipping test because SBE is fully enabled.");
+if (checkSBEEnabled(db)) {
+ jsTest.log("Skipping test because SBE is enabled.");
return;
}
diff --git a/jstests/core/query/plan_cache/plan_cache_sbe.js b/jstests/core/query/plan_cache/plan_cache_sbe.js
index aeb0609246c..2a35b786d70 100644
--- a/jstests/core/query/plan_cache/plan_cache_sbe.js
+++ b/jstests/core/query/plan_cache/plan_cache_sbe.js
@@ -12,8 +12,8 @@
* assumes_read_concern_unchanged,
* assumes_read_preference_unchanged,
* assumes_unsharded_collection,
- * # The SBE plan cache was introduced in 6.0.
- * requires_fcv_60,
+ * # The SBE plan cache was first enabled in 6.3.
+ * requires_fcv_63,
* # Plan cache state is node-local and will not get migrated alongside tenant data.
* tenant_migration_incompatible,
* # TODO SERVER-67607: Test plan cache with CQF enabled.
@@ -30,7 +30,7 @@ load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
const coll = db.plan_cache_sbe;
coll.drop();
-const isSbeEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"], true /* checkAllNodes */);
+const isSbeEnabled = checkSBEEnabled(db);
assert.commandWorked(coll.insert({a: 1, b: 1}));
diff --git a/jstests/core/query/plan_cache/plan_cache_shell_helpers.js b/jstests/core/query/plan_cache/plan_cache_shell_helpers.js
index b663e09e90b..a102d8a001d 100644
--- a/jstests/core/query/plan_cache/plan_cache_shell_helpers.js
+++ b/jstests/core/query/plan_cache/plan_cache_shell_helpers.js
@@ -21,7 +21,7 @@ load('jstests/aggregation/extras/utils.js'); // For assertArrayEq.
load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-const isSbeEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
+const isSbeEnabled = checkSBEEnabled(db);
var coll = db.jstests_plan_cache_shell_helpers;
coll.drop();
diff --git a/jstests/core/query/project/projection_expr_mod.js b/jstests/core/query/project/projection_expr_mod.js
index aa882d190b5..a8f2905bce5 100644
--- a/jstests/core/query/project/projection_expr_mod.js
+++ b/jstests/core/query/project/projection_expr_mod.js
@@ -4,14 +4,7 @@
"use strict";
load("jstests/aggregation/extras/utils.js"); // For assertArrayEq.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-
-const isSBEEnabled = checkSBEEnabledOnSomeNode(db);
-if (isSBEEnabled) {
- // Override error-code-checking APIs. We only load this when SBE is explicitly enabled, because
- // it causes failures in the parallel suites.
- load("jstests/libs/sbe_assert_error_override.js");
-}
+load("jstests/libs/sbe_assert_error_override.js");
const coll = db.projection_expr_mod;
coll.drop();
diff --git a/jstests/core/query/project/projection_semantics.js b/jstests/core/query/project/projection_semantics.js
index cf71f951755..1f811c84b36 100644
--- a/jstests/core/query/project/projection_semantics.js
+++ b/jstests/core/query/project/projection_semantics.js
@@ -6,7 +6,8 @@
* # We could potentially need to resume an index build in the event of a stepdown, which is not
* # yet implemented.
* does_not_support_stepdowns,
- * requires_fcv_62,
+ * # Columnstore indexes were first enabled by default on 6.3.
+ * requires_fcv_63,
* # Columnstore tests set server parameters to disable columnstore query planning heuristics -
* # server parameters are stored in-memory only so are not transferred onto the recipient.
* tenant_migration_incompatible,
@@ -19,7 +20,7 @@
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest.
-if (!checkSBEEnabled(db, ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"])) {
+if (!checkSBEEnabled(db, ["featureFlagColumnstoreIndexes"])) {
jsTestLog("Skipping test since columnstore Indexes are not enabled");
return;
}
diff --git a/jstests/core/query/query_hash_stability.js b/jstests/core/query/query_hash_stability.js
index aa0a3399e70..c358f1c7d34 100644
--- a/jstests/core/query/query_hash_stability.js
+++ b/jstests/core/query/query_hash_stability.js
@@ -112,7 +112,7 @@ assertPlanCacheField({
// SBE's planCacheKey encoding encodes "collection version" which will be increased after dropping
// an index.
-if (!checkSBEEnabled(db, ["featureFlagSbeFull"])) {
+if (!checkSBEEnabled(db)) {
// The 'planCacheKey' should be the same as what it was before we dropped the index.
assertPlanCacheField({
firstExplain: initialExplain,
diff --git a/jstests/core/sbe/from_plan_cache_flag.js b/jstests/core/sbe/from_plan_cache_flag.js
index ef910b42513..15fee88f73c 100644
--- a/jstests/core/sbe/from_plan_cache_flag.js
+++ b/jstests/core/sbe/from_plan_cache_flag.js
@@ -1,8 +1,10 @@
-// The test runs commands that are not allowed with security token: setProfilingLevel.
// @tags: [
+// # The test runs commands that are not allowed with security token: setProfilingLevel.
// not_allowed_with_security_token,
// requires_profiling,
// does_not_support_stepdowns,
+// # The SBE plan cache was first enabled in 6.3.
+// requires_fcv_63,
// # TODO SERVER-67607: Test plan cache with CQF enabled.
// cqf_incompatible,
// ]
@@ -14,7 +16,7 @@ load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
load("jstests/libs/profiler.js"); // For getLatestProfilerEntry.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-if (!checkSBEEnabled(db, ["featureFlagSbeFull"], true /* checkAllNodes */)) {
+if (!checkSBEEnabled(db)) {
jsTest.log("Skip running the test because SBE is not enabled");
return;
}
@@ -33,11 +35,6 @@ assert.commandWorked(coll.insert({a: 2}));
let pipeline = {$match: {a: 1}};
coll.aggregate([pipeline]).toArray();
let profileObj = getLatestProfilerEntry(testDB);
-/* fromPlanCache can be undefined in the profiler entry. The first ! determines the
- * profileObj.fromPlanCache value's associated true/false value (important in the case where
- * undefined) and then returns the opposite of the associated true/false value. The second !
- * returns the opposite of the opposite value. In other words, the !! returns the boolean true/false
- * association of a value. */
assert.eq(!!profileObj.fromPlanCache, false);
coll.aggregate({$match: {a: 2}}).toArray();
diff --git a/jstests/core/sbe/plan_cache_sbe_with_or_queries.js b/jstests/core/sbe/plan_cache_sbe_with_or_queries.js
index a4c474c82bd..f4a90d42dc1 100644
--- a/jstests/core/sbe/plan_cache_sbe_with_or_queries.js
+++ b/jstests/core/sbe/plan_cache_sbe_with_or_queries.js
@@ -7,7 +7,8 @@
// assumes_read_concern_unchanged,
// assumes_unsharded_collection,
// does_not_support_stepdowns,
-// requires_fcv_60,
+// # The SBE plan cache was first enabled in 6.3.
+// requires_fcv_63,
// # Plan cache state is node-local and will not get migrated alongside tenant data.
// tenant_migration_incompatible,
// # TODO SERVER-67607: Test plan cache with CQF enabled.
@@ -20,7 +21,7 @@
load("jstests/libs/analyze_plan.js");
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-if (!checkSBEEnabled(db, ["featureFlagSbeFull"], true /* checkAllNodes */)) {
+if (!checkSBEEnabled(db)) {
jsTest.log("Skip running the test because SBE is not enabled");
return;
}
diff --git a/jstests/core/sbe/sbe_explain_rejected_plans.js b/jstests/core/sbe/sbe_explain_rejected_plans.js
index 0e858cfd48c..a6ae2575186 100644
--- a/jstests/core/sbe/sbe_explain_rejected_plans.js
+++ b/jstests/core/sbe/sbe_explain_rejected_plans.js
@@ -1,6 +1,9 @@
/**
* Tests that SBE reports correct rejected plans when calling explain().
- * @tags: [assumes_unsharded_collection, requires_fcv_62]
+ * @tags: [
+ * assumes_unsharded_collection,
+ * requires_fcv_63,
+ * ]
*/
(function() {
"use strict";
@@ -9,9 +12,9 @@ load("jstests/libs/analyze_plan.js");
load("jstests/libs/collection_drop_recreate.js");
load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled'.
-const isSBEEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
+const isSBEEnabled = checkSBEEnabled(db);
if (!isSBEEnabled) {
- jsTestLog("Skipping test because the SBE feature flag is disabled");
+ jsTestLog("Skipping test because SBE is disabled");
return;
}
diff --git a/jstests/core/sbe/sbe_ixscan_explain.js b/jstests/core/sbe/sbe_ixscan_explain.js
index fee902af2b8..d79b8a209fc 100644
--- a/jstests/core/sbe/sbe_ixscan_explain.js
+++ b/jstests/core/sbe/sbe_ixscan_explain.js
@@ -3,7 +3,8 @@
//
// @tags: [
// assumes_against_mongod_not_mongos,
-// requires_fcv_51,
+// # The SBE plan cache was first enabled in 6.3.
+// requires_fcv_63,
// ]
(function() {
@@ -12,9 +13,9 @@
load('jstests/libs/analyze_plan.js'); // For getPlanStages
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-const isSBEEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"], true /* checkAllNodes */);
+const isSBEEnabled = checkSBEEnabled(db);
if (!isSBEEnabled) {
- jsTestLog("Skipping test because the SBE feature flag is disabled");
+ jsTestLog("Skipping test because SBE is disabled");
return;
}
diff --git a/jstests/core/sbe_plan_cache_autoparameterize_collscan.js b/jstests/core/sbe_plan_cache_autoparameterize_collscan.js
index f5be457ce99..3294846e3bd 100644
--- a/jstests/core/sbe_plan_cache_autoparameterize_collscan.js
+++ b/jstests/core/sbe_plan_cache_autoparameterize_collscan.js
@@ -7,8 +7,8 @@
* assumes_read_preference_unchanged,
* assumes_unsharded_collection,
* does_not_support_stepdowns,
- * # The SBE plan cache was introduced in 6.0.
- * requires_fcv_60,
+ * # The SBE plan cache was enabled by default in 6.3.
+ * requires_fcv_63,
* # Plan cache state is node-local and will not get migrated alongside tenant data.
* tenant_migration_incompatible,
* # TODO SERVER-67607: Test plan cache with CQF enabled.
@@ -24,9 +24,9 @@ load("jstests/libs/analyze_plan.js");
load("jstests/libs/sbe_util.js");
// This test is specifically verifying the behavior of the SBE plan cache, which is only enabled
-// when 'featureFlagSbeFull' is on.
-if (!checkSBEEnabled(db, ["featureFlagSbeFull"], true /* checkAllNodes */)) {
- jsTestLog("Skipping test because SBE is not fully enabled");
+// when SBE is enabled.
+if (!checkSBEEnabled(db)) {
+ jsTestLog("Skipping test because SBE is not enabled");
return;
}
@@ -305,32 +305,35 @@ runTest({query: {a: {$exists: true}}, projection: {_id: 1}},
false);
// Test that comparisons expressed as $expr are not auto-parameterized.
-runTest({query: {$expr: {$eq: ["$a", 3]}}, projection: {_id: 1}},
- [{_id: 2}],
- {query: {$expr: {$eq: ["$a", 4]}}, projection: {_id: 1}},
- [{_id: 3}, {_id: 4}],
- false);
-runTest({query: {$expr: {$lt: ["$a", 3]}, a: {$type: "number"}}, projection: {_id: 1}},
- [{_id: 0}, {_id: 1}],
- {query: {$expr: {$lt: ["$a", 4]}, a: {$type: "number"}}, projection: {_id: 1}},
- [{_id: 0}, {_id: 1}, {_id: 2}],
- false);
-runTest({query: {$expr: {$lte: ["$a", 3]}, a: {$type: "number"}}, projection: {_id: 1}},
- [{_id: 0}, {_id: 1}, {_id: 2}],
- {query: {$expr: {$lte: ["$a", 4]}, a: {$type: "number"}}, projection: {_id: 1}},
- [{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}],
- false);
-runTest({query: {$expr: {$gt: ["$a", 2]}, a: {$type: "number"}}, projection: {_id: 1}},
- [{_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}, {_id: 6}],
- {query: {$expr: {$gt: ["$a", 3]}, a: {$type: "number"}}, projection: {_id: 1}},
- [{_id: 3}, {_id: 4}, {_id: 5}, {_id: 6}],
- false);
-runTest({query: {$expr: {$gte: ["$a", 2]}, a: {$type: "number"}}, projection: {_id: 1}},
- [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}, {_id: 6}],
- {query: {$expr: {$gte: ["$a", 3]}, a: {$type: "number"}}, projection: {_id: 1}},
- [{_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}, {_id: 6}],
- false);
-
+if (checkSBEEnabled(db, ["featureFlagSbeFull"])) {
+ runTest({query: {$expr: {$eq: ["$a", 3]}}, projection: {_id: 1}},
+ [{_id: 2}],
+ {query: {$expr: {$eq: ["$a", 4]}}, projection: {_id: 1}},
+ [{_id: 3}, {_id: 4}],
+ false);
+ runTest({query: {$expr: {$lt: ["$a", 3]}, a: {$type: "number"}}, projection: {_id: 1}},
+ [{_id: 0}, {_id: 1}],
+ {query: {$expr: {$lt: ["$a", 4]}, a: {$type: "number"}}, projection: {_id: 1}},
+ [{_id: 0}, {_id: 1}, {_id: 2}],
+ false);
+ runTest({query: {$expr: {$lte: ["$a", 3]}, a: {$type: "number"}}, projection: {_id: 1}},
+ [{_id: 0}, {_id: 1}, {_id: 2}],
+ {query: {$expr: {$lte: ["$a", 4]}, a: {$type: "number"}}, projection: {_id: 1}},
+ [{_id: 0}, {_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}],
+ false);
+ runTest({query: {$expr: {$gt: ["$a", 2]}, a: {$type: "number"}}, projection: {_id: 1}},
+ [{_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}, {_id: 6}],
+ {query: {$expr: {$gt: ["$a", 3]}, a: {$type: "number"}}, projection: {_id: 1}},
+ [{_id: 3}, {_id: 4}, {_id: 5}, {_id: 6}],
+ false);
+ runTest({query: {$expr: {$gte: ["$a", 2]}, a: {$type: "number"}}, projection: {_id: 1}},
+ [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}, {_id: 6}],
+ {query: {$expr: {$gte: ["$a", 3]}, a: {$type: "number"}}, projection: {_id: 1}},
+ [{_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}, {_id: 6}],
+ false);
+} else {
+ jsTestLog("Skipping $expr test cases because SBE is not fully enabled");
+}
// Test that the entire list of $in values is treated as a parameter.
runTest({query: {a: {$in: [1, 2]}}, projection: {_id: 1}},
[{_id: 0}, {_id: 1}],
diff --git a/jstests/cqf/analyze/scalar_histograms.js b/jstests/cqf/analyze/scalar_histograms.js
index 7777880a1f8..3ca38f88f89 100644
--- a/jstests/cqf/analyze/scalar_histograms.js
+++ b/jstests/cqf/analyze/scalar_histograms.js
@@ -2,18 +2,12 @@
"use strict";
load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
if (!checkCascadesOptimizerEnabled(db)) {
jsTestLog("Skipping test because the optimizer is not enabled");
return;
}
-if (checkSBEEnabled(db, ["featureFlagSbeFull"], true)) {
- jsTestLog("Skipping the test because it doesn't work in Full SBE");
- return;
-}
-
assert.commandWorked(
db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"}));
diff --git a/jstests/libs/parallelTester.js b/jstests/libs/parallelTester.js
index bff2dc5fb43..20ba30354fc 100644
--- a/jstests/libs/parallelTester.js
+++ b/jstests/libs/parallelTester.js
@@ -253,6 +253,15 @@ if (typeof _threadInject != "undefined") {
// inMemory storage engine.
"timeseries/timeseries_compact.js",
+ // TODO (SERVER-63228): Remove this exclusion once the feature flag is enabled by
+ // default.
+ "timeseries/timeseries_index_ttl_partial.js",
+
+ // These tests load 'sbe_assert_error_override.js' unconditionally, which causes
+ // failures in the parallel suite.
+ "computed_projections.js",
+ "query/project/projection_expr_mod.js",
+
// TODO (SERVER-66393): Remove this exclusion once the feature flag is enabled by
// default.
"timeseries/timeseries_update_multi.js",
diff --git a/jstests/libs/sbe_explain_helpers.js b/jstests/libs/sbe_explain_helpers.js
index 994394c9abb..a405efc1ae5 100644
--- a/jstests/libs/sbe_explain_helpers.js
+++ b/jstests/libs/sbe_explain_helpers.js
@@ -41,24 +41,6 @@ function getSbePlanStages(queryLayerOutput, stage) {
}
/**
- * Helper to make an assertion depending on the engine being used. If we're in a mixed version
- * cluster, then we assert that either 'classicAssert' or 'sbeAssert' is true because the outcome
- * will depend on which node we're making assertions against. If we're not in a mixed version
- * scenario, then we make an assertion depending on the return value of 'checkSBEEnabled'.
- */
-function engineSpecificAssertion(classicAssert, sbeAssert, theDB, msg) {
- if (checkBothEnginesAreRunOnCluster(theDB)) {
- assert(classicAssert || sbeAssert, msg);
- } else if (checkSBEEnabled(theDB, ["featureFlagSbeFull"])) {
- // This function assumes that SBE is fully enabled, and will fall back to the classic
- // assert if it is not.
- assert(sbeAssert, msg);
- } else {
- assert(classicAssert, msg);
- }
-}
-
-/**
* Gets the query info object at either the top level or the first stage from a v2
* explainOutput. If a query is a find query or some prefix stage(s) of a pipeline is pushed down to
* SBE, then plan information will be in the 'queryPlanner' object. Currently, this supports find
diff --git a/jstests/libs/sbe_util.js b/jstests/libs/sbe_util.js
index 1508052cae0..90451a646f7 100644
--- a/jstests/libs/sbe_util.js
+++ b/jstests/libs/sbe_util.js
@@ -98,89 +98,3 @@ function checkSBEEnabled(theDB, featureFlags = [], checkAllNodes = false) {
return checkResult;
}
-
-/**
- * If 'theDB' corresponds to a node in a cluster, then returns true if the cluster that it
- * belongs to has at least one node that has SBE enabled and at least one node that has it
- * disabled; false otherwise.
- */
-function checkBothEnginesAreRunOnCluster(theDB) {
- let result = false;
- assert.soon(() => {
- if (!FixtureHelpers.isMongos(theDB) && !FixtureHelpers.isReplSet(theDB)) {
- return true;
- }
-
- // Retry the check if we fail to discover the topology (this can happen if the test
- // suite has killed the primary).
- let nodes;
- try {
- nodes = DiscoverTopology.findNonConfigNodes(theDB.getMongo());
- } catch (e) {
- return false;
- }
-
- let engineMap = {sbe: 0, classic: 0};
-
- for (const node of nodes) {
- // If we cannot contact a node because it was killed or is otherwise unreachable, we
- // skip it and check the other nodes in the cluster. For our purposes, this is ok
- // because test suites which step down/kill certain nodes are configured to use
- // exactly one engine, whereas the test suites which are configured use both engines
- // (namely, the multiversion suites), do not step down/kill nodes.
- try {
- const conn = new Mongo(node);
- if (FixtureHelpers.isMongos(conn.getDB("admin"))) {
- continue;
- }
-
- const getParam = conn.adminCommand({
- getParameter: 1,
- internalQueryFrameworkControl: 1,
- internalQueryForceClassicEngine: 1,
- featureFlagSbeFull: 1,
- });
-
- if (getParam.hasOwnProperty("internalQueryFrameworkControl")) {
- // We say SBE is fully enabled if the engine is on and either
- // 'featureFlagSbeFull' doesn't exist on the targeted server, or it exists and
- // is set to true.
- if (getParam.internalQueryFrameworkControl !== "forceClassicEngine" &&
- getParam.featureFlagSbeFull.value) {
- engineMap.sbe++;
- } else {
- engineMap.classic++;
- }
- } else {
- // 'internalQueryForceClassicEngine' should be set on the previous versions
- // before 'internalQueryFrameworkControl' is introduced.
- assert(getParam.hasOwnProperty("internalQueryForceClassicEngine"), getParam);
- if (!getParam.internalQueryForceClassicEngine.value &&
- getParam.featureFlagSbeFull.value) {
- engineMap.sbe++;
- } else {
- engineMap.classic++;
- }
- }
-
- result = (engineMap.sbe > 0 && engineMap.classic > 0);
- if (result) {
- return true;
- }
- } catch (e) {
- continue;
- }
- }
-
- return true;
- });
-
- return result;
-}
-
-/**
- * Returns 'true' if SBE is enabled on at least on one node for the given connection 'db'.
- */
-function checkSBEEnabledOnSomeNode(db) {
- return checkSBEEnabled(db) || checkBothEnginesAreRunOnCluster(db);
-}
diff --git a/jstests/noPassthrough/columnstore_index_rowstore_settings.js b/jstests/noPassthrough/columnstore_index_rowstore_settings.js
index 54f405cf7b7..22a2475cdc0 100644
--- a/jstests/noPassthrough/columnstore_index_rowstore_settings.js
+++ b/jstests/noPassthrough/columnstore_index_rowstore_settings.js
@@ -3,9 +3,8 @@
* reconstruct the result of a query.
*
* @tags: [
- * # column store indexes are still under a feature flag and require full sbe
- * featureFlagColumnstoreIndexes,
- * featureFlagSbeFull,
+ * # column store indexes are still under a feature flag
+ * featureFlagColumnstoreIndexes
* ]
*/
diff --git a/jstests/noPassthrough/currentop_query.js b/jstests/noPassthrough/currentop_query.js
index 7b6be89d2d8..a3244f66343 100644
--- a/jstests/noPassthrough/currentop_query.js
+++ b/jstests/noPassthrough/currentop_query.js
@@ -79,7 +79,7 @@ function runTests({conn, currentOp, truncatedOps, localOps}) {
const isLocalMongosCurOp = (FixtureHelpers.isMongos(testDB) && localOps);
const isRemoteShardCurOp = (FixtureHelpers.isMongos(testDB) && !localOps);
- const sbeEnabled = checkSBEEnabled(testDB, ["featureFlagSbeFull"]);
+ const sbeEnabled = checkSBEEnabled(testDB);
// If 'truncatedOps' is true, run only the subset of tests designed to validate the
// truncation behaviour. Otherwise, run the standard set of tests which assume that
diff --git a/jstests/noPassthrough/external_sort_find.js b/jstests/noPassthrough/external_sort_find.js
index befac93c5dc..a1505f129a3 100644
--- a/jstests/noPassthrough/external_sort_find.js
+++ b/jstests/noPassthrough/external_sort_find.js
@@ -21,7 +21,7 @@ assert.neq(null, conn, "mongod was unable to start up with options: " + tojson(o
const testDb = conn.getDB("test");
const collection = testDb.external_sort_find;
-const isSBEEnabled = checkSBEEnabled(testDb, ["featureFlagSbeFull"]);
+const isSBEEnabled = checkSBEEnabled(testDb);
// Construct a document that is just over 1 kB.
const charToRepeat = "-";
diff --git a/jstests/noPassthrough/log_and_profile_query_hash.js b/jstests/noPassthrough/log_and_profile_query_hash.js
index b31cfcfe0ad..65093194711 100644
--- a/jstests/noPassthrough/log_and_profile_query_hash.js
+++ b/jstests/noPassthrough/log_and_profile_query_hash.js
@@ -110,7 +110,7 @@ const testList = [
test: function(db, comment) {
assert.eq(200, db.test.find().comment(comment).itcount());
},
- hasPlanCacheKey: checkSBEEnabled(testDB, ["featureFlagSbeFull"])
+ hasPlanCacheKey: checkSBEEnabled(testDB)
},
{
comment: "Test1 find query",
diff --git a/jstests/noPassthrough/lookup_metrics.js b/jstests/noPassthrough/lookup_metrics.js
index 44082f99ac2..9f2aec0bfdb 100644
--- a/jstests/noPassthrough/lookup_metrics.js
+++ b/jstests/noPassthrough/lookup_metrics.js
@@ -8,8 +8,7 @@
load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'.
load("jstests/libs/analyze_plan.js"); // For 'getAggPlanStages' and other explain helpers.
-const conn =
- MongoRunner.runMongod({setParameter: {featureFlagSbeFull: true, allowDiskUseByDefault: true}});
+const conn = MongoRunner.runMongod({setParameter: {allowDiskUseByDefault: true}});
assert.neq(null, conn, "mongod was unable to start up");
const db = conn.getDB(jsTestName());
diff --git a/jstests/noPassthrough/lookup_pushdown.js b/jstests/noPassthrough/lookup_pushdown.js
index 8c6496d10f1..641c73d5ff2 100644
--- a/jstests/noPassthrough/lookup_pushdown.js
+++ b/jstests/noPassthrough/lookup_pushdown.js
@@ -18,7 +18,7 @@ const JoinAlgorithm = {
};
// Standalone cases.
-const conn = MongoRunner.runMongod({setParameter: {allowDiskUseByDefault: true}});
+const conn = MongoRunner.runMongod();
assert.neq(null, conn, "mongod was unable to start up");
const name = "lookup_pushdown";
const foreignCollName = "foreign_lookup_pushdown";
@@ -112,15 +112,13 @@ function runTest(coll,
}
let db = conn.getDB(name);
-if (!checkSBEEnabled(db)) {
- jsTestLog("Skipping test because either the sbe lookup pushdown feature flag is disabled or" +
- " sbe itself is disabled");
+const sbeEnabled = checkSBEEnabled(db);
+if (!sbeEnabled) {
+ jsTestLog("Skipping test because SBE is disabled");
MongoRunner.stopMongod(conn);
return;
}
-const sbeFullEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
-
let coll = db[name];
const localDocs = [{_id: 1, a: 2}];
assert.commandWorked(coll.insert(localDocs));
@@ -300,48 +298,6 @@ function setLookupPushdownDisabled(value) {
{allowDiskUse: false});
}());
-// Verify that SBE is only used when a $lookup or a $group is present.
-(function testLookupGroupIsRequiredForPushdown() {
- // Don't execute this test case if SBE is fully enabled.
- if (sbeFullEnabled) {
- jsTestLog("Skipping test case because we are supporting SBE beyond $group and $lookup" +
- " pushdown");
- return;
- }
-
- const assertEngineUsed = function(pipeline, isSBE) {
- const explain = coll.explain().aggregate(pipeline);
- assert(explain.hasOwnProperty("explainVersion"), explain);
- if (isSBE) {
- assert.eq(explain.explainVersion, "2", explain);
- } else {
- assert.eq(explain.explainVersion, "1", explain);
- }
- };
-
- const lookup = {$lookup: {from: "coll", localField: "a", foreignField: "b", as: "out"}};
- const group = {
- $group: {
- _id: "$a",
- out: {$min: "$b"},
- }
- };
- const match = {$match: {a: 1}};
-
- // $lookup and $group should each run in SBE.
- assertEngineUsed([lookup], true /* isSBE */);
- assertEngineUsed([group], true /* isSBE */);
- assertEngineUsed([lookup, group], true /* isSBE */);
-
- // $match on its own won't use SBE, nor will an empty pipeline.
- assertEngineUsed([match], false /* isSBE */);
- assertEngineUsed([], false /* isSBE */);
-
- // $match will use SBE if followed by either a $group or a $lookup.
- assertEngineUsed([match, lookup], true /* isSBE */);
- assertEngineUsed([match, group], true /* isSBE */);
-})();
-
// Build an index on the foreign collection that matches the foreignField. This should cause us
// to choose an indexed nested loop join.
(function testIndexNestedLoopJoinRegularIndex() {
@@ -707,61 +663,56 @@ function setLookupPushdownDisabled(value) {
// Test which verifies that the right side of a classic $lookup is never lowered into SBE, even if
// the queries for the right side are eligible on their own to run in SBE.
(function verifyThatClassicLookupRightSideIsNeverLoweredIntoSBE() {
- // If running with SBE fully enabled, verify that our $match is SBE compatible. Otherwise,
- // verify that the same $match, when used as a $lookup sub-pipeline, will not be lowered
- // into SBE.
+ // Confirm that our candidate subpipeline is SBE compatible on its own.
const subPipeline = [{$match: {b: 2}}];
- if (sbeFullEnabled) {
- const subPipelineExplain = foreignColl.explain().aggregate(subPipeline);
- assert(subPipelineExplain.hasOwnProperty("explainVersion"), subPipelineExplain);
- assert.eq(subPipelineExplain["explainVersion"], "2", subPipelineExplain);
- } else {
- const pipeline = [{$lookup: {from: foreignCollName, pipeline: subPipeline, as: "result"}}];
- runTest(coll, pipeline, JoinAlgorithm.Classic /* expectedJoinAlgorithm */);
-
- // Create multiple indexes that can be used to answer the subPipeline query. This will allow
- // the winning plan to be cached.
- assert.commandWorked(foreignColl.dropIndexes());
- assert.commandWorked(foreignColl.createIndexes([{b: 1, a: 1}, {b: 1, c: 1}]));
-
- // Run the pipeline enough times to generate a cache entry for the right side in the foreign
- // collection.
- coll.aggregate(pipeline).itcount();
- coll.aggregate(pipeline).itcount();
-
- const cacheEntries = foreignColl.getPlanCache().list();
- assert.eq(cacheEntries.length, 1);
- const cacheEntry = cacheEntries[0];
-
- // The cached plan should be a classic plan.
- assert(cacheEntry.hasOwnProperty("version"), cacheEntry);
- assert.eq(cacheEntry.version, "1", cacheEntry);
- assert(cacheEntry.hasOwnProperty("cachedPlan"), cacheEntry);
- const cachedPlan = cacheEntry.cachedPlan;
-
- // The cached plan should not have slot based plan. Instead, it should be a FETCH + IXSCAN
- // executed in the classic engine.
- assert(!cachedPlan.hasOwnProperty("slots"), cacheEntry);
- assert(cachedPlan.hasOwnProperty("stage"), cacheEntry);
-
- assert(planHasStage(db, cachedPlan, "FETCH"), cacheEntry);
- assert(planHasStage(db, cachedPlan, "IXSCAN"), cacheEntry);
- assert.commandWorked(coll.dropIndexes());
- }
+ const subPipelineExplain = foreignColl.explain().aggregate(subPipeline);
+ assert(subPipelineExplain.hasOwnProperty("explainVersion"), subPipelineExplain);
+ assert.eq(subPipelineExplain["explainVersion"], "2", subPipelineExplain);
+
+ // Now, run a lookup and force it to run in the classic engine by prefixing it with
+ // '$_internalInhibitOptimization'.
+ const pipeline = [
+ {$_internalInhibitOptimization: {}},
+ {$lookup: {from: foreignCollName, pipeline: subPipeline, as: "result"}}
+ ];
+ runTest(coll, pipeline, JoinAlgorithm.Classic /* expectedJoinAlgorithm */);
+
+ // Create multiple indexes that can be used to answer the subPipeline query. This will allow
+ // the winning plan to be cached.
+ assert.commandWorked(foreignColl.dropIndexes());
+ assert.commandWorked(foreignColl.createIndexes([{b: 1, a: 1}, {b: 1, c: 1}]));
+
+ // Run the pipeline enough times to generate a cache entry for the right side in the foreign
+ // collection.
+ coll.aggregate(pipeline).itcount();
+ coll.aggregate(pipeline).itcount();
+
+ const cacheEntries = foreignColl.getPlanCache().list();
+ assert.eq(cacheEntries.length, 1);
+ const cacheEntry = cacheEntries[0];
+
+ // The cached plan should be a classic plan.
+ assert(cacheEntry.hasOwnProperty("version"), cacheEntry);
+ assert.eq(cacheEntry.version, "1", cacheEntry);
+ assert(cacheEntry.hasOwnProperty("cachedPlan"), cacheEntry);
+ const cachedPlan = cacheEntry.cachedPlan;
+
+ // The cached plan should not have slot based plan. Instead, it should be a FETCH + IXSCAN
+ // executed in the classic engine.
+ assert(!cachedPlan.hasOwnProperty("slots"), cacheEntry);
+ assert(cachedPlan.hasOwnProperty("stage"), cacheEntry);
+
+ assert(planHasStage(db, cachedPlan, "FETCH"), cacheEntry);
+ assert(planHasStage(db, cachedPlan, "IXSCAN"), cacheEntry);
+ assert.commandWorked(coll.dropIndexes());
}());
MongoRunner.stopMongod(conn);
-// Verify that pipeline stages get pushed down according to the subset of SBE that is enabled.
-(function verifyPushdownLogicSbePartiallyEnabled() {
- const conn = MongoRunner.runMongod({setParameter: {allowDiskUseByDefault: true}});
+// Verify that $lookup and $group stages get pushed down as expected.
+(function verifyLookupGroupStagesArePushedDown() {
+ const conn = MongoRunner.runMongod();
const db = conn.getDB(name);
- if (sbeFullEnabled) {
- jsTestLog("Skipping test case because SBE is fully enabled, but this test case assumes" +
- " that it is not fully enabled");
- MongoRunner.stopMongod(conn);
- return;
- }
const coll = db[name];
const foreignColl = db[foreignCollName];
@@ -838,7 +789,7 @@ MongoRunner.stopMongod(conn);
(function testHashJoinQueryKnobs() {
// Create a new scope and start a new mongod so that the mongod-wide global state changes do not
// affect subsequent tests if any.
- const conn = MongoRunner.runMongod({setParameter: {featureFlagSbeFull: true}});
+ const conn = MongoRunner.runMongod();
const db = conn.getDB(name);
const lcoll = db.query_knobs_local;
const fcoll = db.query_knobs_foreign;
@@ -851,8 +802,7 @@ MongoRunner.stopMongod(conn);
runTest(lcoll,
[{$lookup: {from: fcoll.getName(), localField: "a", foreignField: "a", as: "out"}}],
JoinAlgorithm.HJ,
- null /* indexKeyPattern */,
- {allowDiskUse: true});
+ null /* indexKeyPattern */);
// The fcollStats.count means the number of documents in a collection, the fcollStats.size means
// the collection's data size, and the fcollStats.storageSize means the allocated storage size.
@@ -868,8 +818,7 @@ MongoRunner.stopMongod(conn);
runTest(lcoll,
[{$lookup: {from: fcoll.getName(), localField: "a", foreignField: "a", as: "out"}}],
JoinAlgorithm.HJ,
- null /* indexKeyPattern */,
- {allowDiskUse: true});
+ null /* indexKeyPattern */);
// Setting the 'internalQueryDisableLookupExecutionUsingHashJoin' knob to true will disable
// HJ plans from being chosen and since the pipeline is SBE compatible it will fallback to
@@ -882,8 +831,7 @@ MongoRunner.stopMongod(conn);
runTest(lcoll,
[{$lookup: {from: fcoll.getName(), localField: "a", foreignField: "a", as: "out"}}],
JoinAlgorithm.NLJ,
- null /* indexKeyPattern */,
- {allowDiskUse: true});
+ null /* indexKeyPattern */);
// Test that we can go back to generating HJ plans.
assert.commandWorked(db.adminCommand({
@@ -894,8 +842,7 @@ MongoRunner.stopMongod(conn);
runTest(lcoll,
[{$lookup: {from: fcoll.getName(), localField: "a", foreignField: "a", as: "out"}}],
JoinAlgorithm.HJ,
- null /* indexKeyPattern */,
- {allowDiskUse: true});
+ null /* indexKeyPattern */);
// Setting the 'internalQueryCollectionMaxNoOfDocumentsToChooseHashJoin' to count - 1 results in
// choosing the NLJ algorithm.
@@ -907,8 +854,7 @@ MongoRunner.stopMongod(conn);
runTest(lcoll,
[{$lookup: {from: fcoll.getName(), localField: "a", foreignField: "a", as: "out"}}],
JoinAlgorithm.NLJ,
- null /* indexKeyPattern */,
- {allowDiskUse: true});
+ null /* indexKeyPattern */);
// Reverting back 'internalQueryCollectionMaxNoOfDocumentsToChooseHashJoin' to the previous
// value. Setting the 'internalQueryCollectionMaxDataSizeBytesToChooseHashJoin' to size - 1
@@ -922,8 +868,7 @@ MongoRunner.stopMongod(conn);
runTest(lcoll,
[{$lookup: {from: fcoll.getName(), localField: "a", foreignField: "a", as: "out"}}],
JoinAlgorithm.NLJ,
- null /* indexKeyPattern */,
- {allowDiskUse: true});
+ null /* indexKeyPattern */);
// Reverting back 'internalQueryCollectionMaxDataSizeBytesToChooseHashJoin' to the previous
// value. Setting the 'internalQueryCollectionMaxStorageSizeBytesToChooseHashJoin' to
@@ -937,8 +882,7 @@ MongoRunner.stopMongod(conn);
runTest(lcoll,
[{$lookup: {from: fcoll.getName(), localField: "a", foreignField: "a", as: "out"}}],
JoinAlgorithm.NLJ,
- null /* indexKeyPattern */,
- {allowDiskUse: true});
+ null /* indexKeyPattern */);
MongoRunner.stopMongod(conn);
}());
@@ -1010,11 +954,7 @@ MongoRunner.stopMongod(conn);
}());
// Sharded cases.
-const st = new ShardingTest({
- shards: 2,
- mongos: 1,
- other: {shardOptions: {setParameter: {featureFlagSbeFull: true, allowDiskUseByDefault: true}}}
-});
+const st = new ShardingTest({shards: 2, mongos: 1});
db = st.s.getDB(name);
// Setup. Here, 'coll' is sharded, 'foreignColl' is unsharded, 'viewName' is an unsharded view,
diff --git a/jstests/noPassthrough/plan_cache_group_lookup.js b/jstests/noPassthrough/plan_cache_group_lookup.js
index f18277689b5..dcef122bf80 100644
--- a/jstests/noPassthrough/plan_cache_group_lookup.js
+++ b/jstests/noPassthrough/plan_cache_group_lookup.js
@@ -22,8 +22,6 @@ if (!checkSBEEnabled(db)) {
return;
}
-const sbeFullEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
-
assert.commandWorked(coll.insert({a: 1}));
assert.commandWorked(coll.createIndex({a: 1, a1: 1}));
assert.commandWorked(coll.createIndex({a: 1, a2: 1}));
@@ -123,7 +121,7 @@ const groupStage = {
(function testLoweredPipelineCombination() {
setupForeignColl();
- const expectedVersion = sbeFullEnabled ? 2 : 1;
+ const expectedVersion = 2;
coll.getPlanCache().clear();
testLoweredPipeline(
@@ -147,16 +145,11 @@ const groupStage = {
setupForeignColl();
testLoweredPipeline({
pipeline: [multiPlanningQueryStage, lookupStage, {$_internalInhibitOptimization: {}}],
- version: sbeFullEnabled ? 2 : 1
+ version: 2
});
})();
(function testNonExistentForeignCollectionCache() {
- if (!sbeFullEnabled) {
- jsTestLog("Skipping testNonExistentForeignCollectionCache when SBE is not fully enabled");
- return;
- }
-
coll.getPlanCache().clear();
foreignColl.drop();
const entryWithoutForeignColl =
@@ -176,12 +169,6 @@ const groupStage = {
})();
(function testForeignCollectionDropCacheInvalidation() {
- if (!sbeFullEnabled) {
- jsTestLog(
- "Skipping testForeignCollectionDropCacheInvalidation when SBE is not fully enabled");
- return;
- }
-
coll.getPlanCache().clear();
setupForeignColl();
testLoweredPipeline({pipeline: [multiPlanningQueryStage, lookupStage], version: 2});
@@ -191,11 +178,6 @@ const groupStage = {
})();
(function testForeignIndexDropCacheInvalidation() {
- if (!sbeFullEnabled) {
- jsTestLog("Skipping testForeignIndexDropCacheInvalidation when SBE is not fully enabled");
- return;
- }
-
coll.getPlanCache().clear();
setupForeignColl({b: 1} /* index */);
testLoweredPipeline({pipeline: [multiPlanningQueryStage, lookupStage], version: 2});
@@ -205,11 +187,6 @@ const groupStage = {
})();
(function testForeignIndexBuildCacheInvalidation() {
- if (!sbeFullEnabled) {
- jsTestLog("Skipping testForeignIndexBuildCacheInvalidation when SBE is not fully enabled");
- return;
- }
-
coll.getPlanCache().clear();
setupForeignColl({b: 1} /* index */);
testLoweredPipeline({pipeline: [multiPlanningQueryStage, lookupStage], version: 2});
@@ -219,11 +196,6 @@ const groupStage = {
})();
(function testLookupSbeAndClassicPlanCacheKey() {
- if (!sbeFullEnabled) {
- jsTestLog("Skipping testLookupWithClassicPlanCache when SBE is not fully enabled");
- return;
- }
-
setupForeignColl({b: 1} /* index */);
// When using SBE engine, the plan cache key of $match vs. $match + $lookup should be different.
diff --git a/jstests/noPassthrough/plan_cache_index_create.js b/jstests/noPassthrough/plan_cache_index_create.js
index fed5fa3c18e..7a37e49bfc4 100644
--- a/jstests/noPassthrough/plan_cache_index_create.js
+++ b/jstests/noPassthrough/plan_cache_index_create.js
@@ -180,8 +180,8 @@ rst.initiate();
const primaryDB = rst.getPrimary().getDB(dbName);
const secondaryDB = rst.getSecondary().getDB(dbName);
-if (checkSBEEnabled(primaryDB, ["featureFlagSbeFull"])) {
- jsTest.log("Skipping test because SBE is fully enabled");
+if (checkSBEEnabled(primaryDB)) {
+ jsTest.log("Skipping test because SBE is enabled");
rst.stopSet();
return;
}
diff --git a/jstests/noPassthrough/plan_cache_list_failed_plans.js b/jstests/noPassthrough/plan_cache_list_failed_plans.js
index 9fd79e8308e..3e778a53e3a 100644
--- a/jstests/noPassthrough/plan_cache_list_failed_plans.js
+++ b/jstests/noPassthrough/plan_cache_list_failed_plans.js
@@ -9,8 +9,8 @@ assert.neq(null, conn, "mongod was unable to start up");
const testDB = conn.getDB("jstests_plan_cache_list_failed_plans");
const coll = testDB.test;
-if (checkSBEEnabled(testDB, ["featureFlagSbeFull"])) {
- jsTest.log("Skipping test because SBE is fully enabled");
+if (checkSBEEnabled(testDB)) {
+ jsTest.log("Skipping test because SBE is enabled");
MongoRunner.stopMongod(conn);
return;
}
diff --git a/jstests/noPassthrough/plan_cache_memory_debug_info.js b/jstests/noPassthrough/plan_cache_memory_debug_info.js
index 331077bebfa..a52d1f5aebe 100644
--- a/jstests/noPassthrough/plan_cache_memory_debug_info.js
+++ b/jstests/noPassthrough/plan_cache_memory_debug_info.js
@@ -83,8 +83,8 @@ assert.neq(conn, null, "mongod failed to start");
const db = conn.getDB("test");
const coll = db.plan_cache_memory_debug_info;
-if (checkSBEEnabled(db, ["featureFlagSbeFull"])) {
- jsTest.log("Skipping test because SBE is fully enabled");
+if (checkSBEEnabled(db)) {
+ jsTest.log("Skipping test because SBE is enabled");
MongoRunner.stopMongod(conn);
return;
}
diff --git a/jstests/noPassthrough/plan_cache_replan_group_lookup.js b/jstests/noPassthrough/plan_cache_replan_group_lookup.js
index 1fa8be2ab10..941b9ba7d8d 100644
--- a/jstests/noPassthrough/plan_cache_replan_group_lookup.js
+++ b/jstests/noPassthrough/plan_cache_replan_group_lookup.js
@@ -19,7 +19,7 @@ const coll = db.plan_cache_replan_group_lookup;
const foreignCollName = "foreign";
coll.drop();
-const sbeFullEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
+const sbeEnabled = checkSBEEnabled(db);
function getPlansForCacheEntry(match) {
const matchingCacheEntries = coll.getPlanCache().list([{$match: match}]);
@@ -165,7 +165,7 @@ const aIndexPredicate = [{$match: {a: 1042, b: 1}}];
// {a: 1} index is used.
const bIndexPredicate = [{$match: {a: 1, b: 1042}}];
-const expectedVersion = sbeFullEnabled ? 2 : 1;
+const expectedVersion = sbeEnabled ? 2 : 1;
// $group tests.
const groupSuffix = [{$group: {_id: "$c"}}, {$count: "n"}];
testFn(aIndexPredicate.concat(groupSuffix),
@@ -249,9 +249,9 @@ assert.eq(2, coll.aggregate(aLookup).toArray()[0].n);
// If SBE plan cache is enabled, a new cache entry will be created in the SBE plan cache after
// invalidation. The corresponding cache entry in SBE plan cache should be inactive because the SBE
// plan cache is invalidated on index drop.
-assertCacheUsage(sbeFullEnabled /*multiPlanning*/,
+assertCacheUsage(sbeEnabled /*multiPlanning*/,
expectedVersion /* cacheEntryVersion */,
- !sbeFullEnabled /*cacheEntryIsActive*/,
+ !sbeEnabled /*cacheEntryIsActive*/,
"a_1" /*cachedIndexName*/,
aLookup);
@@ -260,7 +260,7 @@ verifyCorrectLookupAlgorithmUsed("NestedLoopJoin", aLookup, {allowDiskUse: false
assert.eq(2, coll.aggregate(aLookup).toArray()[0].n);
// Note that multi-planning is expected here when the SBE plan cache is enabled because the
// 'allowDiskUse' value is part of the SBE plan cache key encoding.
-assertCacheUsage(sbeFullEnabled /*multiPlanning*/,
+assertCacheUsage(sbeEnabled /*multiPlanning*/,
expectedVersion /* cacheEntryVersion */,
true /*cacheEntryIsActive*/,
"a_1" /*cachedIndexName*/,
@@ -270,9 +270,9 @@ assertCacheUsage(sbeFullEnabled /*multiPlanning*/,
dropLookupForeignColl();
verifyCorrectLookupAlgorithmUsed("NonExistentForeignCollection", aLookup, {allowDiskUse: true});
assert.eq(2, coll.aggregate(aLookup).toArray()[0].n);
-assertCacheUsage(sbeFullEnabled /*multiPlanning*/,
+assertCacheUsage(sbeEnabled /*multiPlanning*/,
expectedVersion /* cacheEntryVersion */,
- !sbeFullEnabled /*cacheEntryIsActive*/,
+ !sbeEnabled /*cacheEntryIsActive*/,
"a_1" /*cachedIndexName*/,
aLookup);
@@ -335,7 +335,7 @@ verifyCorrectLookupAlgorithmUsed(
// If SBE plan cache is enabled, after dropping index, the $lookup plan cache will be invalidated.
// We will need to rerun the multi-planner.
-if (sbeFullEnabled) {
+if (sbeEnabled) {
runLookupQuery({allowDiskUse: false});
assertCacheUsage(true /*multiPlanning*/,
2 /* cacheEntryVersion */,
@@ -355,7 +355,7 @@ if (sbeFullEnabled) {
runLookupQuery({allowDiskUse: false});
assertCacheUsage(false /*multiPlanning*/,
- sbeFullEnabled ? 2 : 1 /* cacheEntryVersion */,
+ sbeEnabled ? 2 : 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
"b_1" /*cachedIndexName*/,
avoidReplanLookupPipeline,
@@ -363,7 +363,7 @@ assertCacheUsage(false /*multiPlanning*/,
runLookupQuery({allowDiskUse: false});
assertCacheUsage(false /*multiPlanning*/,
- sbeFullEnabled ? 2 : 1 /* cacheEntryVersion */,
+ sbeEnabled ? 2 : 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
"b_1" /*cachedIndexName*/,
avoidReplanLookupPipeline,
@@ -375,7 +375,7 @@ verifyCorrectLookupAlgorithmUsed("HashJoin", avoidReplanLookupPipeline, {allowDi
// If SBE plan cache is enabled, using different 'allowDiskUse' option will result in
// different plan cache key.
-if (sbeFullEnabled) {
+if (sbeEnabled) {
runLookupQuery({allowDiskUse: true});
assertCacheUsage(true /*multiPlanning*/,
2 /* cacheEntryVersion */,
@@ -395,14 +395,14 @@ if (sbeFullEnabled) {
runLookupQuery({allowDiskUse: true});
assertCacheUsage(false /*multiPlanning*/,
- sbeFullEnabled ? 2 : 1 /* cacheEntryVersion */,
+ sbeEnabled ? 2 : 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
"b_1" /*cachedIndexName*/,
avoidReplanLookupPipeline,
{allowDiskUse: true});
runLookupQuery({allowDiskUse: true});
assertCacheUsage(false /*multiPlanning*/,
- sbeFullEnabled ? 2 : 1 /* cacheEntryVersion */,
+ sbeEnabled ? 2 : 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
"b_1" /*cachedIndexName*/,
avoidReplanLookupPipeline,
@@ -428,25 +428,25 @@ verifyCorrectLookupAlgorithmUsed("IndexedLoopJoin", avoidReplanLookupPipeline);
// Set up an active cache entry.
runLookupQuery();
assertCacheUsage(true /*multiPlanning*/,
- sbeFullEnabled ? 2 : 1 /* cacheEntryVersion */,
+ sbeEnabled ? 2 : 1 /* cacheEntryVersion */,
false /*activeCacheEntry*/,
"b_1" /*cachedIndexName*/,
avoidReplanLookupPipeline);
runLookupQuery();
assertCacheUsage(true /*multiPlanning*/,
- sbeFullEnabled ? 2 : 1 /* cacheEntryVersion */,
+ sbeEnabled ? 2 : 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
"b_1" /*cachedIndexName*/,
avoidReplanLookupPipeline);
runLookupQuery();
assertCacheUsage(false /*multiPlanning*/,
- sbeFullEnabled ? 2 : 1 /* cacheEntryVersion */,
+ sbeEnabled ? 2 : 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
"b_1" /*cachedIndexName*/,
avoidReplanLookupPipeline);
runLookupQuery();
assertCacheUsage(false /*multiPlanning*/,
- sbeFullEnabled ? 2 : 1 /* cacheEntryVersion */,
+ sbeEnabled ? 2 : 1 /* cacheEntryVersion */,
true /*activeCacheEntry*/,
"b_1" /*cachedIndexName*/,
avoidReplanLookupPipeline);
@@ -459,7 +459,7 @@ assertCacheUsage(false /*multiPlanning*/,
* solution.
*/
function testReplanningAndCacheInvalidationOnForeignCollSizeIncrease(singleSolution) {
- if (!sbeFullEnabled) {
+ if (!sbeEnabled) {
return;
}
@@ -552,7 +552,7 @@ let explain = coll.explain().aggregate(avoidReplanLookupPipeline);
const eqLookupNodes = getAggPlanStages(explain, "EQ_LOOKUP");
assert.eq(eqLookupNodes.length, 0, "expected no EQ_LOOKUP nodes; got " + tojson(explain));
-if (sbeFullEnabled) {
+if (sbeEnabled) {
runLookupQuery();
const profileObj = getLatestProfilerEntry(db, {op: "command", ns: coll.getFullName()});
const matchingCacheEntries =
@@ -621,7 +621,7 @@ explain = coll.explain().aggregate(avoidReplanLookupPipeline);
groupNodes = getAggPlanStages(explain, "GROUP");
assert.eq(groupNodes.length, 0);
-if (sbeFullEnabled) {
+if (sbeEnabled) {
runGroupQuery();
const profileObj = getLatestProfilerEntry(db, {op: "command", ns: coll.getFullName()});
const matchingCacheEntries =
diff --git a/jstests/noPassthrough/plan_cache_replan_sort.js b/jstests/noPassthrough/plan_cache_replan_sort.js
index 711a2676a15..16c80ea346b 100644
--- a/jstests/noPassthrough/plan_cache_replan_sort.js
+++ b/jstests/noPassthrough/plan_cache_replan_sort.js
@@ -44,7 +44,7 @@ assert.eq(1, cachedPlans.length, cachedPlans);
assert.eq(true, cachedPlans[0].isActive, cachedPlans);
const cachedPlan = getCachedPlan(cachedPlans[0].cachedPlan);
const cachedPlanVersion = cachedPlans[0].version;
-if (checkSBEEnabled(db, ["featureFlagSbeFull"])) {
+if (checkSBEEnabled(db)) {
// If the SBE plan cache is on, then the cached plan has a different format.
assert.eq(cachedPlanVersion, "2", cachedPlans);
assert(cachedPlan.stages.includes("sort"), cachedPlans);
diff --git a/jstests/noPassthrough/plan_cache_stats_agg_source.js b/jstests/noPassthrough/plan_cache_stats_agg_source.js
index 01bbf6a75a0..9c2777e1f04 100644
--- a/jstests/noPassthrough/plan_cache_stats_agg_source.js
+++ b/jstests/noPassthrough/plan_cache_stats_agg_source.js
@@ -16,11 +16,7 @@ assert.neq(null, conn, "mongod failed to start up");
const testDb = conn.getDB("test");
const coll = testDb.plan_cache_stats_agg_source;
-
-// Note that the "getParameter" command is expected to fail in versions of mongod that do not yet
-// include the slot-based execution engine. When that happens, however, 'isSBEEnabled' still
-// correctly evaluates to false.
-const isSBEEnabled = checkSBEEnabled(testDb, ["featureFlagSbeFull"]);
+const isSBEEnabled = checkSBEEnabled(testDb);
function makeMatchForFilteringByShape(query) {
const keyHash = getPlanCacheKeyFromShape({query: query, collection: coll, db: testDb});
diff --git a/jstests/noPassthrough/query_engine_stats.js b/jstests/noPassthrough/query_engine_stats.js
index bf0cef1f043..a505b7512a0 100644
--- a/jstests/noPassthrough/query_engine_stats.js
+++ b/jstests/noPassthrough/query_engine_stats.js
@@ -15,8 +15,8 @@ assert.neq(null, conn, "mongod was unable to start up");
let db = conn.getDB(jsTestName());
// This test assumes that SBE is being used for most queries.
-if (!checkSBEEnabled(db, ["featureFlagSbeFull"])) {
- jsTestLog("Skipping test because SBE is not fully enabled");
+if (!checkSBEEnabled(db)) {
+ jsTestLog("Skipping test because SBE is not enabled");
MongoRunner.stopMongod(conn);
return;
}
diff --git a/jstests/noPassthrough/restart_index_build_if_resume_fails.js b/jstests/noPassthrough/restart_index_build_if_resume_fails.js
index 0336894febf..bcd1e3a50ce 100644
--- a/jstests/noPassthrough/restart_index_build_if_resume_fails.js
+++ b/jstests/noPassthrough/restart_index_build_if_resume_fails.js
@@ -24,9 +24,9 @@ rst.initiate();
let primary = rst.getPrimary();
let coll = primary.getDB(dbName).getCollection(collName);
-const columnstoreEnabled = checkSBEEnabled(primary.getDB(dbName),
- ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"],
- true /* checkAllNodes */) &&
+const columnstoreEnabled =
+ checkSBEEnabled(
+ primary.getDB(dbName), ["featureFlagColumnstoreIndexes"], true /* checkAllNodes */) &&
setUpServerForColumnStoreIndexTest(primary.getDB(dbName));
assert.commandWorked(coll.insert({a: 1}));
diff --git a/jstests/noPassthrough/restart_index_build_if_resume_interrupted_by_shutdown.js b/jstests/noPassthrough/restart_index_build_if_resume_interrupted_by_shutdown.js
index 0d9d4b1ff9b..46f5fd8d80a 100644
--- a/jstests/noPassthrough/restart_index_build_if_resume_interrupted_by_shutdown.js
+++ b/jstests/noPassthrough/restart_index_build_if_resume_interrupted_by_shutdown.js
@@ -25,8 +25,7 @@ rst.initiate();
let primary = rst.getPrimary();
const columnstoreEnabled =
- checkSBEEnabled(
- primary.getDB(dbName), ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"], true) &&
+ checkSBEEnabled(primary.getDB(dbName), ["featureFlagColumnstoreIndexes"], true) &&
setUpServerForColumnStoreIndexTest(primary.getDB(dbName));
ResumableIndexBuildTest.runResumeInterruptedByShutdown(
diff --git a/jstests/noPassthrough/resumable_index_build_bulk_load_phase.js b/jstests/noPassthrough/resumable_index_build_bulk_load_phase.js
index 6e24516c726..147c4e4281e 100644
--- a/jstests/noPassthrough/resumable_index_build_bulk_load_phase.js
+++ b/jstests/noPassthrough/resumable_index_build_bulk_load_phase.js
@@ -21,9 +21,8 @@ const rst = new ReplSetTest({nodes: 1});
rst.startSet();
rst.initiate();
-const columnstoreEnabled = checkSBEEnabled(rst.getPrimary().getDB(dbName),
- ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"],
- true) &&
+const columnstoreEnabled =
+ checkSBEEnabled(rst.getPrimary().getDB(dbName), ["featureFlagColumnstoreIndexes"], true) &&
setUpServerForColumnStoreIndexTest(rst.getPrimary().getDB(dbName));
const runTests = function(docs, indexSpecsFlat, collNameSuffix) {
diff --git a/jstests/noPassthrough/resumable_index_build_bulk_load_phase_large.js b/jstests/noPassthrough/resumable_index_build_bulk_load_phase_large.js
index 3c217cc272f..8cbdcb18268 100644
--- a/jstests/noPassthrough/resumable_index_build_bulk_load_phase_large.js
+++ b/jstests/noPassthrough/resumable_index_build_bulk_load_phase_large.js
@@ -23,9 +23,8 @@ const rst = new ReplSetTest(
rst.startSet();
rst.initiate();
-const columnstoreEnabled = checkSBEEnabled(rst.getPrimary().getDB(dbName),
- ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"],
- true) &&
+const columnstoreEnabled =
+ checkSBEEnabled(rst.getPrimary().getDB(dbName), ["featureFlagColumnstoreIndexes"], true) &&
setUpServerForColumnStoreIndexTest(rst.getPrimary().getDB(dbName));
// Insert enough data so that the collection scan spills to disk.
diff --git a/jstests/noPassthrough/resumable_index_build_clearing_tmp_directory_on_restart.js b/jstests/noPassthrough/resumable_index_build_clearing_tmp_directory_on_restart.js
index fb563c40cca..c94db0b2ee7 100644
--- a/jstests/noPassthrough/resumable_index_build_clearing_tmp_directory_on_restart.js
+++ b/jstests/noPassthrough/resumable_index_build_clearing_tmp_directory_on_restart.js
@@ -30,8 +30,7 @@ rst.initiate();
// Insert enough data so that the collection scan spills to disk.
const primary = rst.getPrimary();
const columnstoreEnabled =
- checkSBEEnabled(
- primary.getDB(dbName), ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"], true) &&
+ checkSBEEnabled(primary.getDB(dbName), ["featureFlagColumnstoreIndexes"], true) &&
setUpServerForColumnStoreIndexTest(primary.getDB(dbName));
const coll = primary.getDB(dbName).getCollection(jsTestName());
const bulk = coll.initializeUnorderedBulkOp();
diff --git a/jstests/noPassthrough/resumable_index_build_collection_scan_phase.js b/jstests/noPassthrough/resumable_index_build_collection_scan_phase.js
index 5186ecfe278..49ec48f5ced 100644
--- a/jstests/noPassthrough/resumable_index_build_collection_scan_phase.js
+++ b/jstests/noPassthrough/resumable_index_build_collection_scan_phase.js
@@ -22,9 +22,8 @@ const rst = new ReplSetTest({nodes: 1});
rst.startSet();
rst.initiate();
-const columnstoreEnabled = checkSBEEnabled(rst.getPrimary().getDB(dbName),
- ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"],
- true) &&
+const columnstoreEnabled =
+ checkSBEEnabled(rst.getPrimary().getDB(dbName), ["featureFlagColumnstoreIndexes"], true) &&
setUpServerForColumnStoreIndexTest(rst.getPrimary().getDB(dbName));
const runTests = function(docs, indexSpecsFlat, collNameSuffix) {
diff --git a/jstests/noPassthrough/resumable_index_build_collection_scan_phase_large.js b/jstests/noPassthrough/resumable_index_build_collection_scan_phase_large.js
index d1c498eb3eb..d1dd867cb3e 100644
--- a/jstests/noPassthrough/resumable_index_build_collection_scan_phase_large.js
+++ b/jstests/noPassthrough/resumable_index_build_collection_scan_phase_large.js
@@ -33,8 +33,7 @@ const primary = rst.getPrimary();
const coll = primary.getDB(dbName).getCollection(jsTestName());
const columnstoreEnabled =
- checkSBEEnabled(
- primary.getDB(dbName), ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"], true) &&
+ checkSBEEnabled(primary.getDB(dbName), ["featureFlagColumnstoreIndexes"], true) &&
setUpServerForColumnStoreIndexTest(primary.getDB(dbName));
const bulk = coll.initializeUnorderedBulkOp();
diff --git a/jstests/noPassthrough/resumable_index_build_drain_writes_phase_primary.js b/jstests/noPassthrough/resumable_index_build_drain_writes_phase_primary.js
index 8e6f529c8c4..52def8c8bde 100644
--- a/jstests/noPassthrough/resumable_index_build_drain_writes_phase_primary.js
+++ b/jstests/noPassthrough/resumable_index_build_drain_writes_phase_primary.js
@@ -25,8 +25,8 @@ rst.initiate();
const primary = rst.getPrimary();
const coll = primary.getDB(dbName).getCollection(collName);
-const columnstoreEnabled = checkSBEEnabled(
- primary.getDB(dbName), ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"], true);
+const columnstoreEnabled =
+ checkSBEEnabled(primary.getDB(dbName), ["featureFlagColumnstoreIndexes"], true);
assert.commandWorked(coll.insert({a: 1}));
diff --git a/jstests/noPassthrough/resumable_index_build_drain_writes_phase_secondary.js b/jstests/noPassthrough/resumable_index_build_drain_writes_phase_secondary.js
index f70971967c1..cb859855d34 100644
--- a/jstests/noPassthrough/resumable_index_build_drain_writes_phase_secondary.js
+++ b/jstests/noPassthrough/resumable_index_build_drain_writes_phase_secondary.js
@@ -33,8 +33,8 @@ rst.initiateWithHighElectionTimeout();
let primary = rst.getPrimary();
let coll = primary.getDB(dbName).getCollection(collName);
-const columnstoreEnabled = checkSBEEnabled(
- primary.getDB(dbName), ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"], true);
+const columnstoreEnabled =
+ checkSBEEnabled(primary.getDB(dbName), ["featureFlagColumnstoreIndexes"], true);
assert.commandWorked(coll.insert({a: 1}));
diff --git a/jstests/noPassthrough/resumable_index_build_initialized.js b/jstests/noPassthrough/resumable_index_build_initialized.js
index f89c0fed9a3..aa672b3260d 100644
--- a/jstests/noPassthrough/resumable_index_build_initialized.js
+++ b/jstests/noPassthrough/resumable_index_build_initialized.js
@@ -22,9 +22,8 @@ const rst = new ReplSetTest({nodes: 1});
rst.startSet();
rst.initiate();
-const columnstoreEnabled = checkSBEEnabled(rst.getPrimary().getDB(dbName),
- ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"],
- true) &&
+const columnstoreEnabled =
+ checkSBEEnabled(rst.getPrimary().getDB(dbName), ["featureFlagColumnstoreIndexes"], true) &&
setUpServerForColumnStoreIndexTest(rst.getPrimary().getDB(dbName));
const runTests = function(docs, indexSpecsFlat, collNameSuffix) {
diff --git a/jstests/noPassthrough/resumable_index_build_mixed_phases.js b/jstests/noPassthrough/resumable_index_build_mixed_phases.js
index d372e51360b..463d481d5e2 100644
--- a/jstests/noPassthrough/resumable_index_build_mixed_phases.js
+++ b/jstests/noPassthrough/resumable_index_build_mixed_phases.js
@@ -21,9 +21,8 @@ const rst = new ReplSetTest({nodes: 1});
rst.startSet();
rst.initiate();
-const columnstoreEnabled = checkSBEEnabled(rst.getPrimary().getDB(dbName),
- ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"],
- true) &&
+const columnstoreEnabled =
+ checkSBEEnabled(rst.getPrimary().getDB(dbName), ["featureFlagColumnstoreIndexes"], true) &&
setUpServerForColumnStoreIndexTest(rst.getPrimary().getDB(dbName));
const runTest = function(docs, indexSpecs, failPoints, resumePhases, resumeChecks, collNameSuffix) {
diff --git a/jstests/noPassthrough/sbe_multiplanner_trial_termination.js b/jstests/noPassthrough/sbe_multiplanner_trial_termination.js
index 0fdd65ef851..a3178cd2858 100644
--- a/jstests/noPassthrough/sbe_multiplanner_trial_termination.js
+++ b/jstests/noPassthrough/sbe_multiplanner_trial_termination.js
@@ -25,8 +25,8 @@ assert.neq(conn, null, "mongod failed to start");
const db = conn.getDB(dbName);
// This test assumes that SBE is being used for most queries.
-if (!checkSBEEnabled(db, ["featureFlagSbeFull"])) {
- jsTestLog("Skipping test because SBE is not fully enabled");
+if (!checkSBEEnabled(db)) {
+ jsTestLog("Skipping test because SBE is not enabled");
MongoRunner.stopMongod(conn);
return;
}
diff --git a/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js b/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js
index 546d94f7e2f..09b94049c97 100644
--- a/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js
+++ b/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js
@@ -56,10 +56,9 @@ assert.neq(conn, null, "mongod failed to start up");
const dbName = jsTestName();
const db = conn.getDB(dbName);
-// This test is specifically verifying the behavior of the SBE plan cache which is enabled by
-// 'featureFlagSbeFull'.
-if (!checkSBEEnabled(db, ["featureFlagSbeFull"])) {
- jsTestLog("Skipping test because SBE is not fully enabled");
+// This test is specifically verifying the behavior of the SBE plan cache.
+if (!checkSBEEnabled(db)) {
+ jsTestLog("Skipping test because SBE is not enabled");
MongoRunner.stopMongod(conn);
return;
}
diff --git a/jstests/noPassthrough/sbe_plan_cache_key_reporting.js b/jstests/noPassthrough/sbe_plan_cache_key_reporting.js
index e603c495d0e..0cf0546a6bd 100644
--- a/jstests/noPassthrough/sbe_plan_cache_key_reporting.js
+++ b/jstests/noPassthrough/sbe_plan_cache_key_reporting.js
@@ -20,8 +20,8 @@ assert.neq(conn, null, "mongod failed to start");
const db = conn.getDB("plan_cache_key_reporting");
const coll = db.coll;
-if (!checkSBEEnabled(db, ["featureFlagSbeFull"])) {
- jsTest.log("Skipping test because SBE is not fully enabled");
+if (!checkSBEEnabled(db)) {
+ jsTest.log("Skipping test because SBE is not enabled");
MongoRunner.stopMongod(conn);
return;
}
diff --git a/jstests/noPassthrough/sbe_plan_cache_memory_debug_info.js b/jstests/noPassthrough/sbe_plan_cache_memory_debug_info.js
index 7bcdba91a34..d07a4456002 100644
--- a/jstests/noPassthrough/sbe_plan_cache_memory_debug_info.js
+++ b/jstests/noPassthrough/sbe_plan_cache_memory_debug_info.js
@@ -19,8 +19,8 @@ const conn = MongoRunner.runMongod({});
assert.neq(conn, null, "mongod failed to start");
const db = conn.getDB("sbe_plan_cache_memory_debug_info");
-if (!checkSBEEnabled(db, ["featureFlagSbeFull"])) {
- jsTest.log("Skipping test because SBE is not fully enabled");
+if (!checkSBEEnabled(db)) {
+ jsTest.log("Skipping test because SBE is not enabled");
MongoRunner.stopMongod(conn);
return;
}
diff --git a/jstests/noPassthrough/sbe_plan_cache_size_metric.js b/jstests/noPassthrough/sbe_plan_cache_size_metric.js
index a0b820d10f2..1eb667754e0 100644
--- a/jstests/noPassthrough/sbe_plan_cache_size_metric.js
+++ b/jstests/noPassthrough/sbe_plan_cache_size_metric.js
@@ -23,8 +23,8 @@ const conn = MongoRunner.runMongod();
assert.neq(conn, null, "mongod failed to start");
const db = conn.getDB("sbe_plan_cache_size_metric");
-if (!checkSBEEnabled(db, ["featureFlagSbeFull"])) {
- jsTest.log("Skipping test because SBE is not fully enabled");
+if (!checkSBEEnabled(db)) {
+ jsTest.log("Skipping test because SBE is not enabled");
MongoRunner.stopMongod(conn);
return;
}
diff --git a/jstests/noPassthrough/server_status_multiplanner.js b/jstests/noPassthrough/server_status_multiplanner.js
index 580bc736daa..db4c528f031 100644
--- a/jstests/noPassthrough/server_status_multiplanner.js
+++ b/jstests/noPassthrough/server_status_multiplanner.js
@@ -24,8 +24,8 @@ assert.neq(conn, null, "mongod failed to start");
const db = conn.getDB(dbName);
// This test assumes that SBE is being used for most queries.
-if (!checkSBEEnabled(db, ["featureFlagSbeFull"])) {
- jsTestLog("Skipping test because SBE is not fully enabled");
+if (!checkSBEEnabled(db)) {
+ jsTestLog("Skipping test because SBE is not enabled");
MongoRunner.stopMongod(conn);
return;
}
diff --git a/jstests/noPassthroughWithMongod/columnstore_planning_heuristics.js b/jstests/noPassthroughWithMongod/columnstore_planning_heuristics.js
index 8bf80294e61..386ae3d57ad 100644
--- a/jstests/noPassthroughWithMongod/columnstore_planning_heuristics.js
+++ b/jstests/noPassthroughWithMongod/columnstore_planning_heuristics.js
@@ -2,9 +2,8 @@
* Testing of the query planner heuristics for determining whether a collection is eligible for
* column scan.
* @tags: [
- * # column store indexes are still under a feature flag and require full sbe
- * featureFlagColumnstoreIndexes,
- * featureFlagSbeFull
+ * # column store indexes are still under a feature flag
+ * featureFlagColumnstoreIndexes
* ]
*/
(function() {
diff --git a/jstests/noPassthroughWithMongod/group_pushdown.js b/jstests/noPassthroughWithMongod/group_pushdown.js
index cb5743bfd1e..0a2c0716de5 100644
--- a/jstests/noPassthroughWithMongod/group_pushdown.js
+++ b/jstests/noPassthroughWithMongod/group_pushdown.js
@@ -8,7 +8,7 @@ load("jstests/libs/analyze_plan.js");
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
if (!checkSBEEnabled(db)) {
- jsTestLog("Skipping test because the sbe group pushdown feature flag is disabled");
+ jsTestLog("Skipping test because SBE is not enabled");
return;
}
@@ -185,28 +185,38 @@ assertResultsMatchWithAndWithoutPushdown(
],
1);
-// The $group stage refers to two existing sub-fields.
-assertResultsMatchWithAndWithoutPushdown(
- coll,
- [
- {$project: {item: 1, price: 1, quantity: 1, dateParts: {$dateToParts: {date: "$date"}}}},
- {
- $group: {
- _id: "$item",
- hs: {$sum: {$add: ["$dateParts.hour", "$dateParts.hour", "$dateParts.minute"]}}
- }
- },
- ],
- [{"_id": "a", "hs": 39}, {"_id": "b", "hs": 34}, {"_id": "c", "hs": 23}],
- 1);
-
-// The $group stage refers to a non-existing sub-field twice.
-assertResultsMatchWithAndWithoutPushdown(
- coll,
- [{$group: {_id: "$item", hs: {$sum: {$add: ["$date.hour", "$date.hour"]}}}}],
- [{"_id": "a", "hs": 0}, {"_id": "b", "hs": 0}, {"_id": "c", "hs": 0}],
- 1);
-
+// Computed projections are only eligible for pushdown into SBE when SBE is fully enabled.
+// Additionally, $group stages with dotted fields may only be eligible for pushdown when SBE is
+// fully enabled as dependancy analysis may produce a dotted projection, which are not currently
+// supported in mainline SBE.
+const sbeFull = checkSBEEnabled(db, ["featureFlagSbeFull"]);
+if (sbeFull) {
+ // The $group stage refers to two existing sub-fields.
+ assertResultsMatchWithAndWithoutPushdown(
+ coll,
+ [
+ {
+ $project:
+ {item: 1, price: 1, quantity: 1, dateParts: {$dateToParts: {date: "$date"}}}
+ },
+ {
+ $group: {
+ _id: "$item",
+ hs: {$sum:
+ {$add: ["$dateParts.hour", "$dateParts.hour", "$dateParts.minute"]}}
+ }
+ },
+ ],
+ [{"_id": "a", "hs": 39}, {"_id": "b", "hs": 34}, {"_id": "c", "hs": 23}],
+ 1);
+
+ // The $group stage refers to a non-existing sub-field twice.
+ assertResultsMatchWithAndWithoutPushdown(
+ coll,
+ [{$group: {_id: "$item", hs: {$sum: {$add: ["$date.hour", "$date.hour"]}}}}],
+ [{"_id": "a", "hs": 0}, {"_id": "b", "hs": 0}, {"_id": "c", "hs": 0}],
+ 1);
+}
// Two group stages both get pushed down and the second $group stage refers to only existing
// top-level fields of the first $group. The field name may be one of "result" / "recordId" /
// "returnKey" / "snapshotId" / "indexId" / "indexKey" / "indexKeyPattern" which are reserved names
diff --git a/jstests/noPassthroughWithMongod/index_bounds_static_limit.js b/jstests/noPassthroughWithMongod/index_bounds_static_limit.js
index dc183a27346..616ddcf2a93 100644
--- a/jstests/noPassthroughWithMongod/index_bounds_static_limit.js
+++ b/jstests/noPassthroughWithMongod/index_bounds_static_limit.js
@@ -9,8 +9,8 @@
load("jstests/libs/analyze_plan.js"); // For explain helpers.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-if (!checkSBEEnabled(db, ["featureFlagSbeFull"])) {
- jsTest.log("Skipping test because SBE is not fully enabled");
+if (!checkSBEEnabled(db)) {
+ jsTest.log("Skipping test because SBE is not enabled");
return;
}
diff --git a/jstests/noPassthroughWithMongod/ne_array_indexability.js b/jstests/noPassthroughWithMongod/ne_array_indexability.js
index f2ecb27151f..e632e5fc1b6 100644
--- a/jstests/noPassthroughWithMongod/ne_array_indexability.js
+++ b/jstests/noPassthroughWithMongod/ne_array_indexability.js
@@ -31,7 +31,7 @@ function runTest(queryToCache, queryToRunAfterCaching) {
// a different planCacheKey. The SBE plan cache, on the other hand, does not auto-parameterize
// $in or $eq involving a constant of type array, and therefore will consider the two queries to
// have different shapes.
- if (checkSBEEnabled(db, ["featureFlagSbeFull"])) {
+ if (checkSBEEnabled(db)) {
assert.neq(explain.queryPlanner.queryHash, cacheEntries[0].queryHash);
} else {
assert.eq(explain.queryPlanner.queryHash, cacheEntries[0].queryHash);
diff --git a/jstests/noPassthroughWithMongod/plan_cache_replanning.js b/jstests/noPassthroughWithMongod/plan_cache_replanning.js
index e17a81cb660..d293413f0fc 100644
--- a/jstests/noPassthroughWithMongod/plan_cache_replanning.js
+++ b/jstests/noPassthroughWithMongod/plan_cache_replanning.js
@@ -10,7 +10,7 @@ load('jstests/libs/analyze_plan.js'); // For getPlanStage().
load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-const isSbeEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
+const isSbeEnabled = checkSBEEnabled(db);
let coll = assertDropAndRecreateCollection(db, "plan_cache_replanning");
diff --git a/jstests/noPassthroughWithMongod/sbe_query_eligibility.js b/jstests/noPassthroughWithMongod/sbe_query_eligibility.js
new file mode 100644
index 00000000000..f9bc9fcb181
--- /dev/null
+++ b/jstests/noPassthroughWithMongod/sbe_query_eligibility.js
@@ -0,0 +1,192 @@
+/**
+ * Test that verifies which query shapes which are eligible for SBE.
+ */
+(function() {
+"use strict";
+
+load("jstests/libs/analyze_plan.js");
+load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled'.
+
+/**
+ * Utility which asserts whether, when aggregating over 'collection' with 'pipeline', whether
+ * explain reports that SBE or classic was used, based on the value of 'isSBE'.
+ */
+function assertEngineUsed(collection, pipeline, isSBE) {
+ const explain = collection.explain().aggregate(pipeline);
+ const expectedExplainVersion = isSBE ? "2" : "1";
+ assert(explain.hasOwnProperty("explainVersion"), explain);
+ assert.eq(explain.explainVersion, expectedExplainVersion, explain);
+}
+
+if (!checkSBEEnabled(db)) {
+ jsTestLog("Skipping test because SBE is disabled");
+ return;
+}
+
+const collName = "sbe_eligiblity";
+const coll = db[collName];
+coll.drop();
+assert.commandWorked(coll.insert({}));
+assert.eq(coll.find().itcount(), 1);
+
+// Simple eligible cases.
+const expectedSbeCases = [
+ // Non-$expr match filters.
+ [{$match: {a: 1}}],
+ [{$match: {"a.b.c": 1}}],
+
+ // Top level projections.
+ [{$project: {a: 1, b: 1}}],
+ [{$project: {a: 0, b: 0}}],
+
+ // Sorts with no common prefixes.
+ [{$sort: {a: 1}}],
+ [{$sort: {"a.b.c": 1}}],
+
+ // Test a combination of the above categories.
+ [{$match: {a: 1}}, {$sort: {b: 1}}],
+ [{$match: {a: 1}}, {$sort: {b: 1, c: 1}}],
+ [{$match: {a: 1}}, {$project: {b: 1, c: 1}}, {$sort: {b: 1, c: 1}}],
+ [
+ {$match: {a: 1, b: 1, "c.d.e": {$mod: [2, 0]}}},
+ {$project: {b: 1, c: 1}},
+ {$sort: {b: 1, c: 1}}
+ ],
+
+ // $lookup and $group should work as expected.
+ [
+ {$match: {a: 1}},
+ {$project: {a: 1}},
+ {$lookup: {from: collName, localField: "a", foreignField: "a", as: "out"}}
+ ],
+ [{$match: {a: 1}}, {$project: {a: 1}}, {$group: {_id: "$a", out: {$sum: 1}}}],
+
+ // If we have a non-SBE compatible expression after the pushdown boundary, this should not
+ // inhibit the pushdown of the pipeline prefix into SBE.
+ [
+ {$match: {a: 1}},
+ {$project: {a: 1}},
+ {$lookup: {from: collName, localField: "a", foreignField: "a", as: "out"}},
+ {$addFields: {foo: {$sum: "$a"}}},
+ {$match: {$expr: {$eq: ["$a", "$b"]}}}
+ ],
+ [
+ {$match: {a: 1}},
+ {$project: {a: 1}},
+ {$group: {_id: "$a", out: {$sum: 1}}},
+ {$match: {$expr: {$eq: ["$a", "$b"]}}}
+ ],
+];
+
+for (const pipeline of expectedSbeCases) {
+ assertEngineUsed(coll, pipeline, true /* isSBE */);
+}
+
+// The cases below are expected to use SBE only if 'featureFlagSbeFull' is on.
+const isSbeFullyEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
+const sbeFullCases = [
+ // Match filters with $expr.
+ [{$match: {$expr: {$eq: ["$a", "$b"]}}}],
+ [{$match: {$and: [{$expr: {$eq: ["$a", "$b"]}}, {c: 1}]}}],
+
+ // Dotted projections.
+ [{$project: {"a.b": 1}}],
+ [{$project: {"a.b": 0}}],
+ [{$project: {"a.b": 1, "a.c": 1}}],
+ [{$project: {"a.b.c.d.e.f.g": 0, "h.i.j.k": 0}}],
+
+ // Computed projections.
+ [{$project: {a: {$add: ["$foo", "$bar"]}}}],
+ [{$project: {a: {$divide: ["$foo", "$bar"]}}}],
+
+ // Sorts with common prefixes.
+ [{$sort: {"a.b": 1, "a.c": 1}}],
+ [{$sort: {"a.b.f.g": 1, "a.d.e.f": 1}}],
+ [{$sort: {"a": 1, "b": 1, "c.d": 1, "c.f": 1}}],
+
+ // Mix SBE-eligible and non-SBE eligible filters, projections and sorts.
+
+ // Match filters with $expr should inhibit pushdown.
+ [{$project: {a: 1, b: 1}}, {$match: {$expr: {$eq: ["$a", "$b"]}}}],
+ [{$match: {$and: [{$expr: {$eq: ["$a", "$b"]}}, {c: 1}]}}, {$sort: {a: 1, d: 1}}],
+ [
+ {$match: {$and: [{$expr: {$eq: ["$a", "$b"]}}, {c: 1}]}},
+ {
+ $lookup:
+ {from: collName, localField: "c_custkey", foreignField: "o_custkey", as: "custsale"}
+ }
+ ],
+
+ // Dotted projections should inhibit pushdown.
+ [{$match: {d: 1}}, {$project: {"a.b": 1}}],
+ [{$sort: {d: 1, e: 1}}, {$project: {"a.b": 0}}],
+ [{$match: {$or: [{a: {$gt: 0}}, {b: {$gt: 0}}]}}, {$project: {"d.a": 1}}],
+
+ [
+ {$project: {"a.b": 1, "a.d": 1}},
+ {$lookup: {from: collName, localField: "a", foreignField: "a", as: "out"}},
+ ],
+
+ // Computed projections should inhibit pushdown.
+ [{$match: {foo: {$gt: 0}}}, {$project: {a: {$add: ["$foo", "$bar"]}}}],
+ [{$project: {a: {$add: ["$foo", "$bar"]}}}, {$sort: {a: 1}}],
+ [
+ {$project: {a: {$add: ["$foo", "$bar"]}}},
+ {$group: {_id: "$a", "ct": {$sum: 1}}},
+ ],
+ [
+ {$project: {a: {$add: ["$out", 1]}}},
+ {$lookup: {from: collName, localField: "a", foreignField: "a", as: "out"}},
+ ],
+
+ // Sorts with common prefixes should inhibit pushdown.
+ [{$match: {foo: {$gt: 0}}}, {$sort: {"a.b": 1, "a.c": 1}}],
+ [{$sort: {"a.b.f.g": 1, "a.d.e.f": 1}}, {$project: {a: 1}}],
+ [{$match: {$or: [{a: {$gt: 0}}, {b: {$gt: 0}}]}}, {$sort: {"d.a": 1, "d.b": 1}}],
+ [
+ {$sort: {"a.b": 1, "a.c": 1}},
+ {$group: {_id: {a: "$foo", b: "$bar"}, "a": {$sum: 1}}},
+ ],
+ [
+ {$sort: {"b.c": 1, "b.d": 1}},
+ {$lookup: {from: collName, localField: "a", foreignField: "a", as: "out"}},
+ ],
+
+ // TPC-H query whose $lookup is SBE compatible, but which features a $match which uses $expr.
+ // Note that $match will be pushed to the front of the pipeline.
+ [
+ {
+ $lookup:
+ {from: collName, localField: "c_custkey", foreignField: "o_custkey", as: "custsale"}
+ },
+ {$addFields: {cntrycode: {$substr: ["$c_phone", 0, 2]}, custsale: {$size: "$custsale"}}},
+ {
+ $match: {
+ $and: [
+ {
+ $expr: {
+ $in: [
+ "$cntrycode",
+ [
+ {$toString: "13"},
+ {$toString: "31"},
+ {$toString: "23"},
+ {$toString: "29"},
+ {$toString: "30"},
+ {$toString: "18"},
+ {$toString: "17"}
+ ]
+ ]
+ }
+ },
+ {$expr: {$gt: ["$c_acctbal", 0.00]}}
+ ]
+ }
+ }
+ ],
+];
+
+for (const pipeline of sbeFullCases) {
+ assertEngineUsed(coll, pipeline, isSbeFullyEnabled /* isSBE */);
+}
+})();
diff --git a/jstests/sharding/invalidate_plan_cache_entries_when_collection_generation_changes.js b/jstests/sharding/invalidate_plan_cache_entries_when_collection_generation_changes.js
index 6aafb4c44ae..ffc349fa76b 100644
--- a/jstests/sharding/invalidate_plan_cache_entries_when_collection_generation_changes.js
+++ b/jstests/sharding/invalidate_plan_cache_entries_when_collection_generation_changes.js
@@ -1,6 +1,11 @@
/**
* Tests that plan cache entries are deleted after shard key refining, resharding and renaming
* operations.
+ *
+ * @tags: [
+ * # The SBE plan cache was enabled by default in 6.3.
+ * requires_fcv_63,
+ * ]
*/
// Cannot run the filtering metadata check on tests that run refineCollectionShardKey.
@@ -28,7 +33,7 @@ const db = st.getDB(dbName);
const collA = db["collA"];
const collB = db["collB"];
-if (!checkSBEEnabled(db, ["featureFlagSbeFull"])) {
+if (!checkSBEEnabled(db)) {
jsTestLog("********** Skip the test because SBE is disabled **********");
st.stop();
return;
diff --git a/jstests/sharding/sbe_plan_cache_does_not_block_range_deletion.js b/jstests/sharding/sbe_plan_cache_does_not_block_range_deletion.js
index e48b23ccf44..b117f7b3e69 100644
--- a/jstests/sharding/sbe_plan_cache_does_not_block_range_deletion.js
+++ b/jstests/sharding/sbe_plan_cache_does_not_block_range_deletion.js
@@ -24,7 +24,7 @@ const st = new ShardingTest({mongos: 1, config: 1, shards: 2});
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
-const isSbeFullyEnabled = checkSBEEnabled(st.s.getDB(dbName), ["featureFlagSbeFull"]);
+const isSBEEnabled = checkSBEEnabled(st.s.getDB(dbName));
const coll = st.s.getDB(dbName)[collName];
@@ -67,9 +67,9 @@ function runTest({indexes, document, filter}) {
});
}
-// Scenario with just one available indexed plan. If SBE is fully enabled, then the SBE plan cache
-// is in use and we expect a pinned plan cache entry.
-if (isSbeFullyEnabled) {
+// Scenario with just one available indexed plan. If SBE is enabled, then the SBE plan cache is in
+// use and we expect a pinned plan cache entry.
+if (isSBEEnabled) {
runTest({indexes: [{a: 1}], document: {_id: 0, a: "abc"}, filter: {a: "abc"}});
}
@@ -83,8 +83,8 @@ runTest({
// Test a rooted $or query. This should use the subplanner. The way that the subplanner interacts
// with the plan cache differs between the classic engine and SBE. In the classic engine, the plan
// for each branch is cached independently, whereas in SBE we cache the entire "composite" plan.
-// This test is written to expect the SBE behavior, so it only runs when SBE is fully enabled.
-if (isSbeFullyEnabled) {
+// This test is written to expect the SBE behavior, so it only runs when SBE is enabled.
+if (isSBEEnabled) {
runTest({
indexes: [{a: 1}, {b: 1}, {c: 1}, {d: 1}],
document: {_id: 0, a: "abc", b: "123", c: 4, d: 5},
diff --git a/src/mongo/db/commands/external_data_source_commands_test.cpp b/src/mongo/db/commands/external_data_source_commands_test.cpp
index 217773fa023..76ff967c904 100644
--- a/src/mongo/db/commands/external_data_source_commands_test.cpp
+++ b/src/mongo/db/commands/external_data_source_commands_test.cpp
@@ -482,10 +482,16 @@ TEST_F(ExternalDataSourceCommandsTest, ScanOverRandomInvalidDataAggRequest) {
pw.wait();
DBDirectClient client(_opCtx);
+
+ // Normally, it would be useful to test an aggregation which features a match filter. However,
+ // when reading invalid BSON, we can potentially crash because the underlying query execution
+ // plan assumes that it is reading valid BSON. As such, this test case could be written to use a
+ // match filter when reading invalid BSON when it is the case that the underlying cursor can
+ // guarantee that any BSONObj given to the query execution plan is valid.
auto aggCmdObj = fromjson(R"(
{
aggregate: "coll",
- pipeline: [{$match: {a: {$lt: 5}}}],
+ pipeline: [],
cursor: {},
$_externalDataSources: [{
collName: "coll",
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index 423014c0c04..79e9b8a18da 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -203,12 +203,9 @@ Status ClearFilters::runIndexFilterCommand(OperationContext* opCtx,
invariant(querySettings);
PlanCache* planCacheClassic = CollectionQueryInfo::get(collection).getPlanCache();
- sbe::PlanCache* planCacheSBE = nullptr;
invariant(planCacheClassic);
-
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- planCacheSBE = &sbe::getPlanCache(opCtx);
- }
+ sbe::PlanCache* planCacheSBE = &sbe::getPlanCache(opCtx);
+ invariant(planCacheSBE);
return clear(opCtx, collection, cmdObj, querySettings, planCacheClassic, planCacheSBE);
}
@@ -327,12 +324,9 @@ Status SetFilter::runIndexFilterCommand(OperationContext* opCtx,
invariant(querySettings);
PlanCache* planCacheClassic = CollectionQueryInfo::get(collection).getPlanCache();
- sbe::PlanCache* planCacheSBE = nullptr;
invariant(planCacheClassic);
-
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- planCacheSBE = &sbe::getPlanCache(opCtx);
- }
+ sbe::PlanCache* planCacheSBE = &sbe::getPlanCache(opCtx);
+ invariant(planCacheSBE);
return set(opCtx, collection, cmdObj, querySettings, planCacheClassic, planCacheSBE);
}
diff --git a/src/mongo/db/commands/plan_cache_clear_command.cpp b/src/mongo/db/commands/plan_cache_clear_command.cpp
index 88b3c9ef07d..9e640a5863e 100644
--- a/src/mongo/db/commands/plan_cache_clear_command.cpp
+++ b/src/mongo/db/commands/plan_cache_clear_command.cpp
@@ -90,11 +90,8 @@ Status clear(OperationContext* opCtx,
canonical_query_encoder::encodeForPlanCacheCommand(*cq))};
plan_cache_commands::removePlanCacheEntriesByPlanCacheCommandKeys(planCacheCommandKeys,
planCache);
-
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- plan_cache_commands::removePlanCacheEntriesByPlanCacheCommandKeys(
- planCacheCommandKeys, collection->uuid(), &sbe::getPlanCache(opCtx));
- }
+ plan_cache_commands::removePlanCacheEntriesByPlanCacheCommandKeys(
+ planCacheCommandKeys, collection->uuid(), &sbe::getPlanCache(opCtx));
return Status::OK();
}
@@ -109,13 +106,11 @@ Status clear(OperationContext* opCtx,
planCache->clear();
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- auto version = CollectionQueryInfo::get(collection).getPlanCacheInvalidatorVersion();
- sbe::clearPlanCacheEntriesWith(opCtx->getServiceContext(),
- collection->uuid(),
- version,
- false /*matchSecondaryCollections*/);
- }
+ auto version = CollectionQueryInfo::get(collection).getPlanCacheInvalidatorVersion();
+ sbe::clearPlanCacheEntriesWith(opCtx->getServiceContext(),
+ collection->uuid(),
+ version,
+ false /*matchSecondaryCollections*/);
LOGV2_DEBUG(
23908, 1, "{namespace}: Cleared plan cache", "Cleared plan cache", "namespace"_attr = ns);
diff --git a/src/mongo/db/exec/plan_cache_util.cpp b/src/mongo/db/exec/plan_cache_util.cpp
index 265eb1e70e7..f223afc79f9 100644
--- a/src/mongo/db/exec/plan_cache_util.cpp
+++ b/src/mongo/db/exec/plan_cache_util.cpp
@@ -76,8 +76,7 @@ void updatePlanCache(OperationContext* opCtx,
const stage_builder::PlanStageData& data) {
// TODO SERVER-67576: re-enable caching of "explode for sort" plans in the SBE cache.
if (shouldCacheQuery(query) && collections.getMainCollection() &&
- !solution.hasExplodedForSort &&
- feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
+ !solution.hasExplodedForSort) {
auto key = plan_cache_key_factory::make(query, collections);
auto plan = std::make_unique<sbe::CachedSbePlan>(root.clone(), data);
plan->indexFilterApplied = solution.indexFilterApplied;
diff --git a/src/mongo/db/exec/plan_cache_util.h b/src/mongo/db/exec/plan_cache_util.h
index b233338882f..05b8e3d6a36 100644
--- a/src/mongo/db/exec/plan_cache_util.h
+++ b/src/mongo/db/exec/plan_cache_util.h
@@ -200,35 +200,31 @@ void updatePlanCache(
if (winningPlan.solution->cacheData != nullptr) {
if constexpr (std::is_same_v<PlanStageType, std::unique_ptr<sbe::PlanStage>>) {
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- tassert(6142201,
- "The winning CandidatePlan should contain the original plan",
- winningPlan.clonedPlan);
- // Clone the winning SBE plan and its auxiliary data.
- auto cachedPlan = std::make_unique<sbe::CachedSbePlan>(
- std::move(winningPlan.clonedPlan->first),
- std::move(winningPlan.clonedPlan->second));
- cachedPlan->indexFilterApplied = winningPlan.solution->indexFilterApplied;
-
- auto buildDebugInfoFn = [soln = winningPlan.solution.get()]()
- -> plan_cache_debug_info::DebugInfoSBE { return buildDebugInfo(soln); };
- PlanCacheCallbacksImpl<sbe::PlanCacheKey,
- sbe::CachedSbePlan,
- plan_cache_debug_info::DebugInfoSBE>
- callbacks{query, buildDebugInfoFn};
- uassertStatusOK(sbe::getPlanCache(opCtx).set(
- plan_cache_key_factory::make(query, collections),
- std::move(cachedPlan),
- *rankingDecision,
- opCtx->getServiceContext()->getPreciseClockSource()->now(),
- &callbacks,
- boost::none /* worksGrowthCoefficient */));
- } else {
- // Fall back to use the classic plan cache.
- //
- // TODO SERVER-64882: Remove this branch after "gFeatureFlagSbeFull" is removed.
- cacheClassicPlan();
- }
+ tassert(6142201,
+ "The winning CandidatePlan should contain the original plan",
+ winningPlan.clonedPlan);
+
+ // Clone the winning SBE plan and its auxiliary data.
+ auto cachedPlan =
+ std::make_unique<sbe::CachedSbePlan>(std::move(winningPlan.clonedPlan->first),
+ std::move(winningPlan.clonedPlan->second));
+ cachedPlan->indexFilterApplied = winningPlan.solution->indexFilterApplied;
+
+ auto buildDebugInfoFn =
+ [soln = winningPlan.solution.get()]() -> plan_cache_debug_info::DebugInfoSBE {
+ return buildDebugInfo(soln);
+ };
+ PlanCacheCallbacksImpl<sbe::PlanCacheKey,
+ sbe::CachedSbePlan,
+ plan_cache_debug_info::DebugInfoSBE>
+ callbacks{query, buildDebugInfoFn};
+ uassertStatusOK(sbe::getPlanCache(opCtx).set(
+ plan_cache_key_factory::make(query, collections),
+ std::move(cachedPlan),
+ *rankingDecision,
+ opCtx->getServiceContext()->getPreciseClockSource()->now(),
+ &callbacks,
+ boost::none /* worksGrowthCoefficient */));
} else {
static_assert(std::is_same_v<PlanStageType, PlanStage*>);
cacheClassicPlan();
diff --git a/src/mongo/db/exec/sbe/expressions/expression.cpp b/src/mongo/db/exec/sbe/expressions/expression.cpp
index a96a559cd2d..26266070fa2 100644
--- a/src/mongo/db/exec/sbe/expressions/expression.cpp
+++ b/src/mongo/db/exec/sbe/expressions/expression.cpp
@@ -933,21 +933,6 @@ vm::CodeFragment generateTraverseCellTypes(CompileCtx& ctx,
return generatorLegacy<&vm::CodeFragment::appendTraverseCellTypes>(ctx, nodes, false);
}
-vm::CodeFragment generateClassicMatcher(CompileCtx& ctx, const EExpression::Vector& nodes, bool) {
- tassert(6681400,
- "First argument to applyClassicMatcher must be constant",
- nodes[0]->as<EConstant>());
- auto [matcherTag, matcherVal] = nodes[0]->as<EConstant>()->getConstant();
- tassert(6681409,
- "First argument to applyClassicMatcher must be a classic matcher",
- matcherTag == value::TypeTags::classicMatchExpresion);
-
- vm::CodeFragment code;
- code.append(nodes[1]->compileDirect(ctx));
- code.appendApplyClassicMatcher(value::getClassicMatchExpressionView(matcherVal));
- return code;
-}
-
/**
* The map of functions that resolve directly to instructions.
*/
@@ -986,7 +971,6 @@ static stdx::unordered_map<std::string, InstrFn> kInstrFunctions = {
{"isMinKey", InstrFn{1, generator<1, &vm::CodeFragment::appendIsMinKey>, false}},
{"isMaxKey", InstrFn{1, generator<1, &vm::CodeFragment::appendIsMaxKey>, false}},
{"isTimestamp", InstrFn{1, generator<1, &vm::CodeFragment::appendIsTimestamp>, false}},
- {"applyClassicMatcher", InstrFn{2, generateClassicMatcher, false}},
};
} // namespace
diff --git a/src/mongo/db/exec/sbe/values/value.cpp b/src/mongo/db/exec/sbe/values/value.cpp
index e21bc694784..a24883a7be9 100644
--- a/src/mongo/db/exec/sbe/values/value.cpp
+++ b/src/mongo/db/exec/sbe/values/value.cpp
@@ -348,9 +348,6 @@ void releaseValueDeep(TypeTags tag, Value val) noexcept {
case TypeTags::indexBounds:
delete getIndexBoundsView(val);
break;
- case TypeTags::classicMatchExpresion:
- delete getClassicMatchExpressionView(val);
- break;
default:
break;
}
diff --git a/src/mongo/db/exec/sbe/values/value.h b/src/mongo/db/exec/sbe/values/value.h
index 0459118ddf1..88cf3d3b011 100644
--- a/src/mongo/db/exec/sbe/values/value.h
+++ b/src/mongo/db/exec/sbe/values/value.h
@@ -191,9 +191,6 @@ enum class TypeTags : uint8_t {
// Pointer to a IndexBounds object.
indexBounds,
-
- // Pointer to a classic engine match expression.
- classicMatchExpresion,
};
inline constexpr bool isNumber(TypeTags tag) noexcept {
@@ -1261,10 +1258,6 @@ inline IndexBounds* getIndexBoundsView(Value val) noexcept {
return reinterpret_cast<IndexBounds*>(val);
}
-inline MatchExpression* getClassicMatchExpressionView(Value val) noexcept {
- return reinterpret_cast<MatchExpression*>(val);
-}
-
inline sbe::value::CsiCell* getCsiCellView(Value val) noexcept {
return reinterpret_cast<sbe::value::CsiCell*>(val);
}
@@ -1479,12 +1472,6 @@ inline std::pair<TypeTags, Value> copyValue(TypeTags tag, Value val) {
return makeCopyCollator(*getCollatorView(val));
case TypeTags::indexBounds:
return makeCopyIndexBounds(*getIndexBoundsView(val));
- case TypeTags::classicMatchExpresion:
- // Beware: "shallow cloning" a match expression does not copy the underlying BSON. The
- // original BSON must remain alive for both the original MatchExpression and the clone.
- return {TypeTags::classicMatchExpresion,
- bitcastFrom<const MatchExpression*>(
- getClassicMatchExpressionView(val)->shallowClone().release())};
default:
break;
}
diff --git a/src/mongo/db/exec/sbe/values/value_printer.cpp b/src/mongo/db/exec/sbe/values/value_printer.cpp
index 4cd70a9196b..2405f698f3f 100644
--- a/src/mongo/db/exec/sbe/values/value_printer.cpp
+++ b/src/mongo/db/exec/sbe/values/value_printer.cpp
@@ -163,9 +163,6 @@ void ValuePrinter<T>::writeTagToStream(TypeTags tag) {
case TypeTags::indexBounds:
stream << "indexBounds";
break;
- case TypeTags::classicMatchExpresion:
- stream << "classicMatchExpression";
- break;
case TypeTags::csiCell:
stream << "csiCell";
break;
@@ -539,9 +536,6 @@ void ValuePrinter<T>::writeValueToStream(TypeTags tag, Value val, size_t depth)
getIndexBoundsView(val)->toString(true /* hasNonSimpleCollation */));
stream << ")";
break;
- case TypeTags::classicMatchExpresion:
- stream << "ClassicMatcher(" << getClassicMatchExpressionView(val)->toString() << ")";
- break;
case TypeTags::csiCell:
stream << "CsiCell(" << getCsiCellView(val) << ")";
break;
diff --git a/src/mongo/db/exec/sbe/vm/vm.cpp b/src/mongo/db/exec/sbe/vm/vm.cpp
index 3a8e48daf0a..35dab757964 100644
--- a/src/mongo/db/exec/sbe/vm/vm.cpp
+++ b/src/mongo/db/exec/sbe/vm/vm.cpp
@@ -166,7 +166,6 @@ int Instruction::stackOffset[Instruction::Tags::lastInstruction] = {
-1, // fail
- 0, // applyClassicMatcher
0, // dateTruncImm
};
@@ -482,18 +481,6 @@ void CodeFragment::appendNumericConvert(value::TypeTags targetTag) {
adjustStackSimple(i);
}
-void CodeFragment::appendApplyClassicMatcher(const MatchExpression* matcher) {
- Instruction i;
- i.tag = Instruction::applyClassicMatcher;
-
- auto offset = allocateSpace(sizeof(Instruction) + sizeof(matcher));
-
- offset += writeToMemory(offset, i);
- offset += writeToMemory(offset, matcher);
-
- adjustStackSimple(i);
-}
-
void CodeFragment::appendSub(Instruction::Parameter lhs, Instruction::Parameter rhs) {
appendSimpleInstruction(Instruction::sub, lhs, rhs);
}
@@ -5750,28 +5737,6 @@ MONGO_COMPILER_NORETURN void ByteCode::runFailInstruction() {
uasserted(code, message);
}
-
-void ByteCode::runClassicMatcher(const MatchExpression* matcher) {
- auto [ownedObj, tagObj, valObj] = getFromStack(0);
-
- BSONObj bsonObjForMatching;
- if (tagObj == value::TypeTags::Object) {
- BSONObjBuilder builder;
- sbe::bson::convertToBsonObj(builder, sbe::value::getObjectView(valObj));
- bsonObjForMatching = builder.obj();
- } else if (tagObj == value::TypeTags::bsonObject) {
- auto bson = value::getRawPointerView(valObj);
- bsonObjForMatching = BSONObj(bson);
- } else {
- MONGO_UNREACHABLE_TASSERT(6681402);
- }
-
- bool res = matcher->matchesBSON(bsonObjForMatching);
- if (ownedObj) {
- value::releaseValue(tagObj, valObj);
- }
- topStack(false, value::TypeTags::Boolean, value::bitcastFrom<bool>(res));
-}
template <typename T>
void ByteCode::runTagCheck(const uint8_t*& pcPointer, T&& predicate) {
auto [popParam, offsetParam] = decodeParam(pcPointer);
@@ -6782,13 +6747,6 @@ void ByteCode::runInternal(const CodeFragment* code, int64_t position) {
runFailInstruction();
break;
}
- case Instruction::applyClassicMatcher: {
- const auto* matcher = readFromMemory<const MatchExpression*>(pcPointer);
- pcPointer += sizeof(matcher);
-
- runClassicMatcher(matcher);
- break;
- }
case Instruction::dateTruncImm: {
auto unit = readFromMemory<TimeUnit>(pcPointer);
pcPointer += sizeof(unit);
diff --git a/src/mongo/db/exec/sbe/vm/vm.h b/src/mongo/db/exec/sbe/vm/vm.h
index b6ee6d2dc75..7034e1fabfe 100644
--- a/src/mongo/db/exec/sbe/vm/vm.h
+++ b/src/mongo/db/exec/sbe/vm/vm.h
@@ -349,8 +349,6 @@ struct Instruction {
fail,
- applyClassicMatcher, // Instruction which calls into the classic engine MatchExpression.
-
dateTruncImm,
lastInstruction // this is just a marker used to calculate number of instructions
@@ -577,8 +575,6 @@ struct Instruction {
return "allocStack";
case fail:
return "fail";
- case applyClassicMatcher:
- return "applyClassicMatcher";
case dateTruncImm:
return "dateTruncImm";
default:
@@ -890,7 +886,6 @@ public:
void appendAllocStack(uint32_t size);
void appendFail();
void appendNumericConvert(value::TypeTags targetTag);
- void appendApplyClassicMatcher(const MatchExpression*);
// For printing from an interactive debugger.
std::string toString() const;
@@ -999,7 +994,6 @@ private:
void runLambdaInternal(const CodeFragment* code, int64_t position);
MONGO_COMPILER_NORETURN void runFailInstruction();
- void runClassicMatcher(const MatchExpression* matcher);
template <typename T>
void runTagCheck(const uint8_t*& pcPointer, T&& predicate);
diff --git a/src/mongo/db/exec/sbe/vm/vm_printer.cpp b/src/mongo/db/exec/sbe/vm/vm_printer.cpp
index 8ac6fe532c2..85c59dc9957 100644
--- a/src/mongo/db/exec/sbe/vm/vm_printer.cpp
+++ b/src/mongo/db/exec/sbe/vm/vm_printer.cpp
@@ -49,10 +49,6 @@ struct CodeFragmentFormatter<CodeFragmentPrinter::PrintFormat::Debug> {
return SlotAccessorFmt{accessor};
}
- auto matchExpression(const MatchExpression* matcher) {
- return MatchExpressionFmt{matcher};
- }
-
struct PcPointerFmt {
const uint8_t* pcPointer;
};
@@ -60,10 +56,6 @@ struct CodeFragmentFormatter<CodeFragmentPrinter::PrintFormat::Debug> {
struct SlotAccessorFmt {
value::SlotAccessor* accessor;
};
-
- struct MatchExpressionFmt {
- const MatchExpression* matcher;
- };
};
template <typename charT, typename traits>
@@ -80,13 +72,6 @@ std::basic_ostream<charT, traits>& operator<<(
return os << static_cast<const void*>(a.accessor);
}
-template <typename charT, typename traits>
-std::basic_ostream<charT, traits>& operator<<(
- std::basic_ostream<charT, traits>& os,
- const CodeFragmentFormatter<CodeFragmentPrinter::PrintFormat::Debug>::MatchExpressionFmt& a) {
- return os << static_cast<const void*>(a.matcher);
-}
-
template <>
struct CodeFragmentFormatter<CodeFragmentPrinter::PrintFormat::Stable> {
CodeFragmentFormatter(const CodeFragment& code) : code(code) {}
@@ -99,10 +84,6 @@ struct CodeFragmentFormatter<CodeFragmentPrinter::PrintFormat::Stable> {
return SlotAccessorFmt{accessor};
}
- auto matchExpression(const MatchExpression* matcher) {
- return MatchExpressionFmt{matcher};
- }
-
struct PcPointerFmt {
const uint8_t* pcPointer;
const uint8_t* pcBegin;
@@ -112,10 +93,6 @@ struct CodeFragmentFormatter<CodeFragmentPrinter::PrintFormat::Stable> {
value::SlotAccessor* accessor;
};
- struct MatchExpressionFmt {
- const MatchExpression* matcher;
- };
-
const CodeFragment& code;
};
@@ -137,13 +114,6 @@ std::basic_ostream<charT, traits>& operator<<(
return os << "<accessor>";
}
-template <typename charT, typename traits>
-std::basic_ostream<charT, traits>& operator<<(
- std::basic_ostream<charT, traits>& os,
- const CodeFragmentFormatter<CodeFragmentPrinter::PrintFormat::Stable>::MatchExpressionFmt& a) {
- return os << "<matchExpression>";
-}
-
template <typename Formatter>
class CodeFragmentPrinterImpl {
public:
@@ -328,12 +298,6 @@ public:
os << "accessor: " << _formatter.slotAccessor(accessor);
break;
}
- case Instruction::applyClassicMatcher: {
- const auto* matcher = readFromMemory<const MatchExpression*>(pcPointer);
- pcPointer += sizeof(matcher);
- os << "matcher: " << _formatter.matchExpression(matcher);
- break;
- }
case Instruction::numConvert: {
auto tag = readFromMemory<value::TypeTags>(pcPointer);
pcPointer += sizeof(tag);
diff --git a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp
index 1ce2581a1e7..471727946c4 100644
--- a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp
+++ b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp
@@ -569,22 +569,21 @@ std::vector<BSONObj> CommonMongodProcessInterface::getMatchingPlanCacheEntryStat
auto planCacheEntries =
planCache->getMatchingStats({} /* cacheKeyFilterFunc */, serializer, predicate);
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- // Retrieve plan cache entries from the SBE plan cache.
- const auto cacheKeyFilter = [uuid = collection->uuid(),
- collVersion = collQueryInfo.getPlanCacheInvalidatorVersion()](
- const sbe::PlanCacheKey& key) {
- // Only fetch plan cache entries with keys matching given UUID and collectionVersion.
- return uuid == key.getMainCollectionState().uuid &&
- collVersion == key.getMainCollectionState().version;
- };
-
- auto planCacheEntriesSBE =
- sbe::getPlanCache(opCtx).getMatchingStats(cacheKeyFilter, serializer, predicate);
-
- planCacheEntries.insert(
- planCacheEntries.end(), planCacheEntriesSBE.begin(), planCacheEntriesSBE.end());
- }
+ // Retrieve plan cache entries from the SBE plan cache.
+ const auto cacheKeyFilter = [uuid = collection->uuid(),
+ collVersion = collQueryInfo.getPlanCacheInvalidatorVersion()](
+ const sbe::PlanCacheKey& key) {
+ // Only fetch plan cache entries with keys matching given UUID and collectionVersion.
+ return uuid == key.getMainCollectionState().uuid &&
+ collVersion == key.getMainCollectionState().version;
+ };
+
+ auto planCacheEntriesSBE =
+ sbe::getPlanCache(opCtx).getMatchingStats(cacheKeyFilter, serializer, predicate);
+
+ planCacheEntries.insert(
+ planCacheEntries.end(), planCacheEntriesSBE.begin(), planCacheEntriesSBE.end());
+
return planCacheEntries;
}
diff --git a/src/mongo/db/query/canonical_query.cpp b/src/mongo/db/query/canonical_query.cpp
index 516e33fe15a..45fc3ef9c41 100644
--- a/src/mongo/db/query/canonical_query.cpp
+++ b/src/mongo/db/query/canonical_query.cpp
@@ -208,8 +208,7 @@ Status CanonicalQuery::init(OperationContext* opCtx,
_root = MatchExpression::normalize(std::move(root));
// If caching is disabled, do not perform any autoparameterization.
- if (!internalQueryDisablePlanCache.load() &&
- feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
+ if (!internalQueryDisablePlanCache.load()) {
const bool hasNoTextNodes =
!QueryPlannerCommon::hasNode(_root.get(), MatchExpression::TEXT);
if (hasNoTextNodes) {
@@ -548,10 +547,8 @@ std::string CanonicalQuery::toStringShort() const {
}
CanonicalQuery::QueryShapeString CanonicalQuery::encodeKey() const {
- return (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV() && !_forceClassicEngine &&
- _sbeCompatible)
- ? canonical_query_encoder::encodeSBE(*this)
- : canonical_query_encoder::encode(*this);
+ return (!_forceClassicEngine && _sbeCompatible) ? canonical_query_encoder::encodeSBE(*this)
+ : canonical_query_encoder::encode(*this);
}
CanonicalQuery::QueryShapeString CanonicalQuery::encodeKeyForPlanCacheCommand() const {
diff --git a/src/mongo/db/query/canonical_query_encoder.cpp b/src/mongo/db/query/canonical_query_encoder.cpp
index 93036c4feda..4313b6efc2b 100644
--- a/src/mongo/db/query/canonical_query_encoder.cpp
+++ b/src/mongo/db/query/canonical_query_encoder.cpp
@@ -44,7 +44,6 @@
#include "mongo/db/pipeline/document_source_lookup.h"
#include "mongo/db/query/analyze_regex.h"
#include "mongo/db/query/projection.h"
-#include "mongo/db/query/query_feature_flags_gen.h"
#include "mongo/db/query/query_knobs_gen.h"
#include "mongo/db/query/tree_walker.h"
#include "mongo/logv2/log.h"
@@ -1089,9 +1088,6 @@ void encodeKeyForAutoParameterizedMatchSBE(MatchExpression* matchExpr, BufBuilde
} // namespace
std::string encodeSBE(const CanonicalQuery& cq) {
- tassert(6512900,
- "using the SBE plan cache key encoding requires SBE to be fully enabled",
- feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV());
tassert(6142104,
"attempting to encode SBE plan cache key for SBE-incompatible query",
cq.isSbeCompatible());
diff --git a/src/mongo/db/query/canonical_query_encoder_test.cpp b/src/mongo/db/query/canonical_query_encoder_test.cpp
index 4987272cf80..12593f56490 100644
--- a/src/mongo/db/query/canonical_query_encoder_test.cpp
+++ b/src/mongo/db/query/canonical_query_encoder_test.cpp
@@ -427,9 +427,6 @@ TEST(CanonicalQueryEncoderTest, ComputeKeySBE) {
// SBE must be enabled in order to generate SBE plan cache keys.
RAIIServerParameterControllerForTest controllerSBE("internalQueryFrameworkControl",
"trySbeEngine");
-
- RAIIServerParameterControllerForTest controllerSBEPlanCache("featureFlagSbeFull", true);
-
testComputeSBEKey(gctx, "{}", "{}", "{}");
testComputeSBEKey(gctx, "{$or: [{a: 1}, {b: 2}]}", "{}", "{}");
testComputeSBEKey(gctx, "{a: 1}", "{}", "{}");
@@ -502,7 +499,6 @@ TEST(CanonicalQueryEncoderTest, ComputeKeySBEWithPipeline) {
RAIIServerParameterControllerForTest controllerSBE("internalQueryFrameworkControl",
"trySbeEngine");
- RAIIServerParameterControllerForTest controllerSBEPlanCache("featureFlagSbeFull", true);
auto getLookupBson = [](StringData localField, StringData foreignField, StringData asField) {
return BSON("$lookup" << BSON("from" << foreignNss.coll() << "localField" << localField
@@ -532,7 +528,6 @@ TEST(CanonicalQueryEncoderTest, ComputeKeySBEWithReadConcern) {
// SBE must be enabled in order to generate SBE plan cache keys.
RAIIServerParameterControllerForTest controllerSBE("internalQueryFrameworkControl",
"trySbeEngine");
- RAIIServerParameterControllerForTest controllerSBEPlanCache("featureFlagSbeFull", true);
// Find command without read concern.
auto findCommand = std::make_unique<FindCommandRequest>(nss);
diff --git a/src/mongo/db/query/canonical_query_test.cpp b/src/mongo/db/query/canonical_query_test.cpp
index a15a3b918b0..2fb5614fd16 100644
--- a/src/mongo/db/query/canonical_query_test.cpp
+++ b/src/mongo/db/query/canonical_query_test.cpp
@@ -456,7 +456,6 @@ TEST(CanonicalQueryTest, InvalidSortOrdersFailToCanonicalize) {
}
TEST(CanonicalQueryTest, DoNotParameterizeTextExpressions) {
- RAIIServerParameterControllerForTest controllerSBEPlanCache("featureFlagSbeFull", true);
auto cq =
canonicalize("{$text: {$search: \"Hello World!\"}}",
MatchExpressionParser::kDefaultSpecialFeatures | MatchExpressionParser::kText);
@@ -464,7 +463,6 @@ TEST(CanonicalQueryTest, DoNotParameterizeTextExpressions) {
}
TEST(CanonicalQueryTest, DoParameterizeRegularExpressions) {
- RAIIServerParameterControllerForTest controllerSBEPlanCache("featureFlagSbeFull", true);
auto cq = canonicalize("{a: 1, b: {$lt: 5}}");
ASSERT_TRUE(cq->isParameterized());
}
diff --git a/src/mongo/db/query/classic_plan_cache.cpp b/src/mongo/db/query/classic_plan_cache.cpp
index 7789d894cb5..41874a76d8a 100644
--- a/src/mongo/db/query/classic_plan_cache.cpp
+++ b/src/mongo/db/query/classic_plan_cache.cpp
@@ -130,8 +130,7 @@ bool shouldCacheQuery(const CanonicalQuery& query) {
const MatchExpression* expr = query.root();
if (!query.getSortPattern() && expr->matchType() == MatchExpression::AND &&
- expr->numChildren() == 0 &&
- !(feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV() && query.isSbeCompatible())) {
+ expr->numChildren() == 0 && !query.isSbeCompatible()) {
return false;
}
diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp
index 545ec8553f4..b9b69eac18d 100644
--- a/src/mongo/db/query/explain.cpp
+++ b/src/mongo/db/query/explain.cpp
@@ -98,7 +98,6 @@ void generatePlannerInfo(PlanExecutor* exec,
const QuerySettings* querySettings =
QuerySettingsDecoration::get(mainCollection->getSharedDecorations());
if (exec->getCanonicalQuery()->isSbeCompatible() &&
- feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV() &&
!exec->getCanonicalQuery()->getForceClassicEngine()) {
const auto planCacheKeyInfo =
plan_cache_key_factory::make(*exec->getCanonicalQuery(), collections);
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index b3a28aef907..157095d941e 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -666,7 +666,7 @@ public:
_fromPlanCache = val;
}
- bool isRecoveredFromPlanCache() {
+ bool isRecoveredFromPlanCache() const {
return _fromPlanCache;
}
@@ -1148,64 +1148,25 @@ protected:
std::unique_ptr<SlotBasedPrepareExecutionResult> buildCachedPlan(
const sbe::PlanCacheKey& planCacheKey) final {
if (shouldCacheQuery(*_cq)) {
- if (!feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- return buildCachedPlanFromClassicCache();
- } else {
- getResult()->planCacheInfo().planCacheKey = planCacheKey.planCacheKeyHash();
-
- auto&& planCache = sbe::getPlanCache(_opCtx);
- auto cacheEntry = planCache.getCacheEntryIfActive(planCacheKey);
- if (!cacheEntry) {
- return nullptr;
- }
+ getResult()->planCacheInfo().planCacheKey = planCacheKey.planCacheKeyHash();
- auto&& cachedPlan = std::move(cacheEntry->cachedPlan);
- auto root = std::move(cachedPlan->root);
- auto stageData = std::move(cachedPlan->planStageData);
- stageData.debugInfo = cacheEntry->debugInfo;
-
- auto result = releaseResult();
- result->setDecisionWorks(cacheEntry->decisionWorks);
- result->setRecoveredPinnedCacheEntry(cacheEntry->isPinned());
- result->emplace(std::make_pair(std::move(root), std::move(stageData)));
- result->setRecoveredFromPlanCache(true);
- return result;
+ auto&& planCache = sbe::getPlanCache(_opCtx);
+ auto cacheEntry = planCache.getCacheEntryIfActive(planCacheKey);
+ if (!cacheEntry) {
+ return nullptr;
}
- }
- return nullptr;
- }
+ auto&& cachedPlan = std::move(cacheEntry->cachedPlan);
+ auto root = std::move(cachedPlan->root);
+ auto stageData = std::move(cachedPlan->planStageData);
+ stageData.debugInfo = cacheEntry->debugInfo;
- // A temporary function to allow recovering SBE plans from the classic plan cache. When the
- // feature flag for "SBE full" is disabled, we are still able to use the classic plan cache for
- // queries that execute in SBE.
- //
- // TODO SERVER-64882: Remove this function when "featureFlagSbeFull" is removed.
- std::unique_ptr<SlotBasedPrepareExecutionResult> buildCachedPlanFromClassicCache() {
- const auto& mainColl = getMainCollection();
- auto planCacheKey = plan_cache_key_factory::make<PlanCacheKey>(*_cq, mainColl);
- getResult()->planCacheInfo().planCacheKey = planCacheKey.planCacheKeyHash();
-
- // Try to look up a cached solution for the query.
- if (auto cs = CollectionQueryInfo::get(mainColl).getPlanCache()->getCacheEntryIfActive(
- planCacheKey)) {
- initializePlannerParamsIfNeeded();
- // We have a CachedSolution. Have the planner turn it into a QuerySolution.
- auto statusWithQs = QueryPlanner::planFromCache(*_cq, _plannerParams, *cs);
-
- if (statusWithQs.isOK()) {
- auto querySolution = std::move(statusWithQs.getValue());
- if (_cq->isCountLike() && turnIxscanIntoCount(querySolution.get())) {
- LOGV2_DEBUG(
- 20923, 2, "Using fast count", "query"_attr = redact(_cq->toStringShort()));
- }
-
- auto result = releaseResult();
- addSolutionToResult(result.get(), std::move(querySolution));
- result->setDecisionWorks(cs->decisionWorks);
- result->setRecoveredFromPlanCache(true);
- return result;
- }
+ auto result = releaseResult();
+ result->setDecisionWorks(cacheEntry->decisionWorks);
+ result->setRecoveredPinnedCacheEntry(cacheEntry->isPinned());
+ result->emplace(std::make_pair(std::move(root), std::move(stageData)));
+ result->setRecoveredFromPlanCache(true);
+ return result;
}
return nullptr;
@@ -1429,56 +1390,52 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getSlotBasedExe
}
/**
- * Checks if the result of query planning is SBE compatible.
+ * Checks if the result of query planning is SBE compatible. In this function, 'sbeFull' indicates
+ * whether the full set of features supported by SBE is enabled, while 'canUseRegularSbe' indicates
+ * whether the query is compatible with the subset of SBE enabled by default.
*/
bool shouldPlanningResultUseSbe(bool sbeFull,
+ bool canUseRegularSbe,
bool columnIndexPresent,
- bool aggSpecificStagesPushedDown,
const SlotBasedPrepareExecutionResult& planningResult) {
+ // If we have an entry in the SBE plan cache, then we can use SBE.
+ if (planningResult.isRecoveredFromPlanCache()) {
+ return true;
+ }
+
// For now this function assumes one of these is true. If all are false, we should not use
// SBE.
tassert(6164401,
- "Expected sbeFull, or a CSI present, or agg specific stages pushed down",
- sbeFull || columnIndexPresent || aggSpecificStagesPushedDown);
+ "Expected sbeFull, or a regular SBE compatiable query, or a CSI present",
+ sbeFull || canUseRegularSbe || columnIndexPresent);
const auto& solutions = planningResult.solutions();
if (solutions.empty()) {
// Query needs subplanning (plans are generated later, we don't have access yet).
invariant(planningResult.needsSubplanning());
- // TODO: SERVER-71798 if the below conditions are not met, a column index will not be used
- // even if it could be.
- return sbeFull || aggSpecificStagesPushedDown;
+ // Use SBE for rooted $or queries if SBE is fully enabled or the query is SBE compatible to
+ // begin with.
+ return sbeFull || canUseRegularSbe;
}
// Check that the query solution is SBE compatible.
const bool allStagesCompatible =
std::all_of(solutions.begin(), solutions.end(), [](const auto& solution) {
- return solution->root() ==
- nullptr /* we won't have a query solution if we pulled it from the cache */
- || isQueryPlanSbeCompatible(solution.get());
+ // We must have a solution, otherwise we would have early exited.
+ invariant(solution->root());
+ return isQueryPlanSbeCompatible(solution.get());
});
if (!allStagesCompatible) {
return false;
}
- if (sbeFull || aggSpecificStagesPushedDown) {
+ if (sbeFull || canUseRegularSbe) {
return true;
}
- // If no pipeline is pushed down and SBE full is off, the only other case we'll use SBE for
- // is when a column index plan was constructed.
- tassert(6164400, "Expected CSI to be present", columnIndexPresent);
-
- // The only time a query solution is not available is when the plan comes from the SBE plan
- // cache. The plan cache is gated by sbeFull, which was already checked earlier. So, at this
- // point we're guaranteed sbeFull is off, and this further implies that the returned plan(s)
- // did not come from the cache.
- tassert(6164402,
- "Did not expect a plan from the plan cache",
- !sbeFull && solutions.front()->root());
-
+ // Return true if we have a column scan plan, and false otherwise.
return solutions.size() == 1 &&
solutions.front()->root()->hasNode(StageType::STAGE_COLUMN_SCAN);
}
@@ -1518,6 +1475,38 @@ bool maybeQueryIsColumnScanEligible(OperationContext* opCtx,
}
/**
+ * Function which returns true if 'cq' uses features that are currently supported in SBE without
+ * 'featureFlagSbeFull' being set; false otherwise.
+ */
+bool shouldUseRegularSbe(const CanonicalQuery& cq) {
+ const auto* proj = cq.getProj();
+
+ // Disallow projections which use expressions.
+ if (proj && proj->hasExpressions()) {
+ return false;
+ }
+
+ // Disallow projections which have dotted paths.
+ if (proj && proj->hasDottedPaths()) {
+ return false;
+ }
+
+ // Disallow filters which feature $expr.
+ if (cq.countNodes(cq.root(), MatchExpression::MatchType::EXPRESSION) > 0) {
+ return false;
+ }
+
+ const auto& sortPattern = cq.getSortPattern();
+
+ // Disallow sorts which have a common prefix.
+ if (sortPattern && sortPatternHasPartsWithCommonPrefix(*sortPattern)) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
* Attempts to create a slot-based executor for the query, if the query plan is eligible for SBE
* execution. This function has three possible return values:
*
@@ -1543,18 +1532,20 @@ attemptToGetSlotBasedExecutor(
}
const bool sbeFull = feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV();
- const bool aggSpecificStagesPushedDown = !canonicalQuery->pipeline().empty();
+ const bool canUseRegularSbe = shouldUseRegularSbe(*canonicalQuery);
- // Attempt to use SBE if we find any $group/$lookup stages eligible for execution in SBE, if the
- // query may be eligible for column scan, or if SBE is fully enabled. Otherwise, fallback to the
- // classic engine right away.
- if (aggSpecificStagesPushedDown || sbeFull ||
+ // Attempt to use SBE if the query may be eligible for column scan, if the currently supported
+ // subset of SBE is being used, or if SBE is fully enabled. Otherwise, fallback to the classic
+ // engine right away.
+ if (sbeFull || canUseRegularSbe ||
maybeQueryIsColumnScanEligible(opCtx, collections, canonicalQuery.get())) {
+ // Create the SBE prepare execution helper and initialize the params for the planner. Our
+ // decision about using SBE will depend on whether there is a column index present.
+
auto sbeYieldPolicy = makeSbeYieldPolicy(
opCtx, yieldPolicy, &collections.getMainCollection(), canonicalQuery->nss());
SlotBasedPrepareExecutionHelper helper{
opCtx, collections, canonicalQuery.get(), sbeYieldPolicy.get(), plannerParams.options};
-
auto planningResultWithStatus = helper.prepare();
if (!planningResultWithStatus.isOK()) {
return planningResultWithStatus.getStatus();
@@ -1563,10 +1554,8 @@ attemptToGetSlotBasedExecutor(
const bool csiPresent =
helper.plannerParams() && !helper.plannerParams()->columnStoreIndexes.empty();
- if (shouldPlanningResultUseSbe(sbeFull,
- csiPresent,
- aggSpecificStagesPushedDown,
- *planningResultWithStatus.getValue())) {
+ if (shouldPlanningResultUseSbe(
+ sbeFull, canUseRegularSbe, csiPresent, *planningResultWithStatus.getValue())) {
if (extractAndAttachPipelineStages) {
// We know now that we will use SBE, so we need to remove the pushed-down stages
// from the original pipeline object.
@@ -1646,6 +1635,8 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutor(
std::move(stdx::get<std::unique_ptr<CanonicalQuery>>(maybeExecutor));
}
}
+ // Ensure that 'sbeCompatible' is set accordingly.
+ canonicalQuery->setSbeCompatible(false);
return getClassicExecutor(
opCtx, mainColl, std::move(canonicalQuery), yieldPolicy, plannerParams);
}();
diff --git a/src/mongo/db/query/projection.h b/src/mongo/db/query/projection.h
index c093083c068..f920a778e9a 100644
--- a/src/mongo/db/query/projection.h
+++ b/src/mongo/db/query/projection.h
@@ -118,6 +118,13 @@ public:
*/
bool isFieldRetainedExactly(StringData path) const;
+
+ /**
+ * Returns true if this projection has any dotted paths; false otherwise.
+ */
+ bool hasDottedPaths() const {
+ return _deps.hasDottedPath;
+ }
/**
* A projection is considered "simple" if it operates only on top-level fields,
* has no positional projection or expressions, and doesn't require metadata.
diff --git a/src/mongo/db/query/query_feature_flags.idl b/src/mongo/db/query/query_feature_flags.idl
index 55a3a24c408..d0289f1f761 100644
--- a/src/mongo/db/query/query_feature_flags.idl
+++ b/src/mongo/db/query/query_feature_flags.idl
@@ -77,8 +77,7 @@ feature_flags:
default: false
featureFlagSbeFull:
- description: "Feature flag for enabling full SBE support. Enables SBE for a much larger class
- of queries, including NLJ $lookup plans. Also enables the SBE plan cache."
+ description: "Feature flag for enabling SBE for a much larger class of queries than what is exposed by default"
cpp_varname: gFeatureFlagSbeFull
default: false
diff --git a/src/mongo/db/query/query_utils.cpp b/src/mongo/db/query/query_utils.cpp
index 917817c739e..71dd2acded6 100644
--- a/src/mongo/db/query/query_utils.cpp
+++ b/src/mongo/db/query/query_utils.cpp
@@ -34,6 +34,21 @@
namespace mongo {
+bool sortPatternHasPartsWithCommonPrefix(const SortPattern& sortPattern) {
+ StringDataSet prefixSet;
+ for (const auto& part : sortPattern) {
+ // Ignore any $meta sorts that may be present.
+ if (!part.fieldPath) {
+ continue;
+ }
+ auto [_, inserted] = prefixSet.insert(part.fieldPath->getFieldName(0));
+ if (!inserted) {
+ return true;
+ }
+ }
+ return false;
+}
+
bool isIdHackEligibleQuery(const CollectionPtr& collection, const CanonicalQuery& query) {
const auto& findCommand = query.getFindCommandRequest();
return !findCommand.getShowRecordId() && findCommand.getHint().isEmpty() &&
@@ -66,10 +81,11 @@ bool isQuerySbeCompatible(const CollectionPtr* collection, const CanonicalQuery*
const bool isQueryNotAgainstClusteredCollection =
!(collection->get() && collection->get()->isClustered());
- const bool doesNotRequireMatchDetails =
- !cq->getProj() || !cq->getProj()->requiresMatchDetails();
+ const auto* proj = cq->getProj();
+
+ const bool doesNotRequireMatchDetails = !proj || !proj->requiresMatchDetails();
- const bool doesNotHaveElemMatchProject = !cq->getProj() || !cq->getProj()->containsElemMatch();
+ const bool doesNotHaveElemMatchProject = !proj || !proj->containsElemMatch();
const bool isNotInnerSideOfLookup = !(expCtx && expCtx->inLookup);
diff --git a/src/mongo/db/query/query_utils.h b/src/mongo/db/query/query_utils.h
index 97165860da1..55a5e069ad3 100644
--- a/src/mongo/db/query/query_utils.h
+++ b/src/mongo/db/query/query_utils.h
@@ -34,6 +34,12 @@
namespace mongo {
/**
+ * Returns 'true' if 'sortPattern' contains any sort pattern parts that share a common prefix, false
+ * otherwise.
+ */
+bool sortPatternHasPartsWithCommonPrefix(const SortPattern& sortPattern);
+
+/**
* Returns 'true' if 'query' on the given 'collection' can be answered using a special IDHACK plan.
*/
bool isIdHackEligibleQuery(const CollectionPtr& collection, const CanonicalQuery& query);
diff --git a/src/mongo/db/query/sbe_cached_solution_planner.cpp b/src/mongo/db/query/sbe_cached_solution_planner.cpp
index 5927bf5722c..5fbe8be2ec3 100644
--- a/src/mongo/db/query/sbe_cached_solution_planner.cpp
+++ b/src/mongo/db/query/sbe_cached_solution_planner.cpp
@@ -52,45 +52,24 @@ CandidatePlans CachedSolutionPlanner::plan(
std::vector<std::unique_ptr<QuerySolution>> solutions,
std::vector<std::pair<std::unique_ptr<PlanStage>, stage_builder::PlanStageData>> roots) {
if (!_cq.pipeline().empty()) {
- // When "featureFlagSbeFull" is enabled we use the SBE plan cache. If the plan cache is
- // enabled we'd like to check if there is any foreign collection in the hash_lookup stage
- // that is no longer eligible for it. In this case we invalidate the cache and immediately
- // replan without ever running a trial period.
+ // We'd like to check if there is any foreign collection in the hash_lookup stage that is no
+ // longer eligible for using a hash_lookup plan. In this case we invalidate the cache and
+ // immediately replan without ever running a trial period.
auto secondaryCollectionsInfo =
fillOutSecondaryCollectionsInformation(_opCtx, _collections, &_cq);
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- for (const auto& foreignCollection : roots[0].second.foreignHashJoinCollections) {
- const auto collectionInfo = secondaryCollectionsInfo.find(foreignCollection);
- tassert(6693500,
- "Foreign collection must be present in the collections info",
- collectionInfo != secondaryCollectionsInfo.end());
- tassert(6693501, "Foreign collection must exist", collectionInfo->second.exists);
+ for (const auto& foreignCollection : roots[0].second.foreignHashJoinCollections) {
+ const auto collectionInfo = secondaryCollectionsInfo.find(foreignCollection);
+ tassert(6693500,
+ "Foreign collection must be present in the collections info",
+ collectionInfo != secondaryCollectionsInfo.end());
+ tassert(6693501, "Foreign collection must exist", collectionInfo->second.exists);
- if (!QueryPlannerAnalysis::isEligibleForHashJoin(collectionInfo->second)) {
- return replan(/* shouldCache */ true,
- str::stream() << "Foreign collection " << foreignCollection
- << " is not eligible for hash join anymore");
- }
+ if (!QueryPlannerAnalysis::isEligibleForHashJoin(collectionInfo->second)) {
+ return replan(/* shouldCache */ true,
+ str::stream() << "Foreign collection " << foreignCollection
+ << " is not eligible for hash join anymore");
}
- } else {
- // The SBE plan cache is not enabled. If the cached plan is accepted we'd like to keep
- // the results from the trials even if there are parts of agg pipelines being lowered
- // into SBE, so we run the trial with the extended plan. This works because
- // TrialRunTracker, attached to HashAgg stage in $group queries, tracks as "results" the
- // results of its child stage. For $lookup queries, the TrialRunTracker will only track
- // the number of reads from the local side. Thus, we can use the number of reads the
- // plan was cached with during multiplanning even though multiplanning ran trials of
- // pre-extended plans.
- //
- // The SBE plan cache stores the entire plan, including the part for any agg pipeline
- // pushed down to SBE. Therefore, this logic is only necessary when "featureFlagSbeFull"
- // is disabled.
- _yieldPolicy->clearRegisteredPlans();
- solutions[0] = QueryPlanner::extendWithAggPipeline(
- _cq, std::move(solutions[0]), secondaryCollectionsInfo);
- roots[0] = stage_builder::buildSlotBasedExecutableTree(
- _opCtx, _collections, _cq, *solutions[0], _yieldPolicy);
}
}
// If the '_decisionReads' is not present then we do not run a trial period, keeping the current
@@ -227,18 +206,9 @@ CandidatePlans CachedSolutionPlanner::replan(bool shouldCache, std::string reaso
_yieldPolicy->clearRegisteredPlans();
if (shouldCache) {
- const auto& mainColl = _collections.getMainCollection();
// Deactivate the current cache entry.
- //
- // TODO SERVER-64882: We currently deactivate cache entries in both the classic and SBE plan
- // caches. Once we always use the SBE plan cache for queries eligible for SBE, this code can
- // be simplified to only deactivate the entry in the SBE plan cache.
- auto cache = CollectionQueryInfo::get(mainColl).getPlanCache();
- cache->deactivate(plan_cache_key_factory::make<mongo::PlanCacheKey>(_cq, mainColl));
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- auto&& sbePlanCache = sbe::getPlanCache(_opCtx);
- sbePlanCache.deactivate(plan_cache_key_factory::make(_cq, _collections));
- }
+ auto&& sbePlanCache = sbe::getPlanCache(_opCtx);
+ sbePlanCache.deactivate(plan_cache_key_factory::make(_cq, _collections));
}
auto buildExecutableTree = [&](const QuerySolution& sol) {
diff --git a/src/mongo/db/query/sbe_plan_cache.cpp b/src/mongo/db/query/sbe_plan_cache.cpp
index 1498fa28932..2129554a2d9 100644
--- a/src/mongo/db/query/sbe_plan_cache.cpp
+++ b/src/mongo/db/query/sbe_plan_cache.cpp
@@ -48,27 +48,23 @@ const auto sbePlanCacheDecoration =
class PlanCacheOnParamChangeUpdaterImpl final : public plan_cache_util::OnParamChangeUpdater {
public:
void updateCacheSize(ServiceContext* serviceCtx, memory_util::MemorySize memSize) final {
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- auto newSizeBytes = memory_util::getRequestedMemSizeInBytes(memSize);
- auto cappedCacheSize = memory_util::capMemorySize(newSizeBytes /*requestedSizeBytes*/,
- 500 /*maximumSizeGB*/,
- 25 /*percentTotalSystemMemory*/);
- if (cappedCacheSize < newSizeBytes) {
- LOGV2_DEBUG(6007001,
- 1,
- "The plan cache size has been capped",
- "cappedSize"_attr = cappedCacheSize);
- }
- auto& globalPlanCache = sbePlanCacheDecoration(serviceCtx);
- globalPlanCache->reset(cappedCacheSize);
+ auto newSizeBytes = memory_util::getRequestedMemSizeInBytes(memSize);
+ auto cappedCacheSize = memory_util::capMemorySize(newSizeBytes /*requestedSizeBytes*/,
+ 500 /*maximumSizeGB*/,
+ 25 /*percentTotalSystemMemory*/);
+ if (cappedCacheSize < newSizeBytes) {
+ LOGV2_DEBUG(6007001,
+ 1,
+ "The plan cache size has been capped",
+ "cappedSize"_attr = cappedCacheSize);
}
+ auto& globalPlanCache = sbePlanCacheDecoration(serviceCtx);
+ globalPlanCache->reset(cappedCacheSize);
}
void clearCache(ServiceContext* serviceCtx) final {
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- auto& globalPlanCache = sbePlanCacheDecoration(serviceCtx);
- globalPlanCache->clear();
- }
+ auto& globalPlanCache = sbePlanCacheDecoration(serviceCtx);
+ globalPlanCache->clear();
}
};
@@ -77,38 +73,29 @@ ServiceContext::ConstructorActionRegisterer planCacheRegisterer{
plan_cache_util::sbePlanCacheOnParamChangeUpdater(serviceCtx) =
std::make_unique<PlanCacheOnParamChangeUpdaterImpl>();
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- auto status = memory_util::MemorySize::parse(planCacheSize.get());
- uassertStatusOK(status);
- auto size = memory_util::getRequestedMemSizeInBytes(status.getValue());
- auto cappedCacheSize = memory_util::capMemorySize(size /*requestedSizeBytes*/,
- 500 /*maximumSizeGB*/,
- 25 /*percentTotalSystemMemory*/);
- if (cappedCacheSize < size) {
- LOGV2_DEBUG(6007000,
- 1,
- "The plan cache size has been capped",
- "cappedSize"_attr = cappedCacheSize);
- }
- auto& globalPlanCache = sbePlanCacheDecoration(serviceCtx);
- globalPlanCache =
- std::make_unique<sbe::PlanCache>(cappedCacheSize, ProcessInfo::getNumCores());
+ auto status = memory_util::MemorySize::parse(planCacheSize.get());
+ uassertStatusOK(status);
+ auto size = memory_util::getRequestedMemSizeInBytes(status.getValue());
+ auto cappedCacheSize = memory_util::capMemorySize(
+ size /*requestedSizeBytes*/, 500 /*maximumSizeGB*/, 25 /*percentTotalSystemMemory*/);
+ if (cappedCacheSize < size) {
+ LOGV2_DEBUG(6007000,
+ 1,
+ "The plan cache size has been capped",
+ "cappedSize"_attr = cappedCacheSize);
}
+ auto& globalPlanCache = sbePlanCacheDecoration(serviceCtx);
+ globalPlanCache =
+ std::make_unique<sbe::PlanCache>(cappedCacheSize, ProcessInfo::getNumCores());
}};
} // namespace
sbe::PlanCache& getPlanCache(ServiceContext* serviceCtx) {
- uassert(5933402,
- "Cannot getPlanCache() if 'featureFlagSbeFull' is disabled",
- feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV());
return *sbePlanCacheDecoration(serviceCtx);
}
sbe::PlanCache& getPlanCache(OperationContext* opCtx) {
- uassert(5933401,
- "Cannot getPlanCache() if 'featureFlagSbeFull' is disabled",
- feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV());
tassert(5933400, "Cannot get the global SBE plan cache by a nullptr", opCtx);
return getPlanCache(opCtx->getServiceContext());
}
@@ -117,32 +104,29 @@ void clearPlanCacheEntriesWith(ServiceContext* serviceCtx,
UUID collectionUuid,
size_t collectionVersion,
bool matchSecondaryCollections) {
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- auto removed =
- sbe::getPlanCache(serviceCtx)
- .removeIf([&collectionUuid, collectionVersion, matchSecondaryCollections](
- const PlanCacheKey& key, const sbe::PlanCacheEntry& entry) {
- if (key.getMainCollectionState().version == collectionVersion &&
- key.getMainCollectionState().uuid == collectionUuid) {
- return true;
- }
- if (matchSecondaryCollections) {
- for (auto& collectionState : key.getSecondaryCollectionStates()) {
- if (collectionState.version == collectionVersion &&
- collectionState.uuid == collectionUuid) {
- return true;
- }
- }
- }
- return false;
- });
-
- LOGV2_DEBUG(6006600,
- 1,
- "Clearing SBE Plan Cache",
- "collectionUuid"_attr = collectionUuid,
- "collectionVersion"_attr = collectionVersion,
- "removedEntries"_attr = removed);
- }
+ auto removed = sbe::getPlanCache(serviceCtx)
+ .removeIf([&collectionUuid, collectionVersion, matchSecondaryCollections](
+ const PlanCacheKey& key, const sbe::PlanCacheEntry& entry) {
+ if (key.getMainCollectionState().version == collectionVersion &&
+ key.getMainCollectionState().uuid == collectionUuid) {
+ return true;
+ }
+ if (matchSecondaryCollections) {
+ for (auto& collectionState : key.getSecondaryCollectionStates()) {
+ if (collectionState.version == collectionVersion &&
+ collectionState.uuid == collectionUuid) {
+ return true;
+ }
+ }
+ }
+ return false;
+ });
+
+ LOGV2_DEBUG(6006600,
+ 1,
+ "Clearing SBE Plan Cache",
+ "collectionUuid"_attr = collectionUuid,
+ "collectionVersion"_attr = collectionVersion,
+ "removedEntries"_attr = removed);
}
} // namespace mongo::sbe
diff --git a/src/mongo/db/query/sbe_stage_builder.cpp b/src/mongo/db/query/sbe_stage_builder.cpp
index 9c354a469ae..8fe87acf72b 100644
--- a/src/mongo/db/query/sbe_stage_builder.cpp
+++ b/src/mongo/db/query/sbe_stage_builder.cpp
@@ -60,6 +60,8 @@
#include "mongo/db/query/bind_input_params.h"
#include "mongo/db/query/expression_walker.h"
#include "mongo/db/query/index_bounds_builder.h"
+#include "mongo/db/query/optimizer/rewrites/const_eval.h"
+#include "mongo/db/query/query_utils.h"
#include "mongo/db/query/sbe_stage_builder_abt_helpers.h"
#include "mongo/db/query/sbe_stage_builder_accumulator.h"
#include "mongo/db/query/sbe_stage_builder_coll_scan.h"
@@ -1215,19 +1217,13 @@ std::pair<std::unique_ptr<sbe::PlanStage>, PlanStageSlots> SlotBasedStageBuilder
return buildSortCovered(root, reqs);
}
- StringDataSet prefixSet;
- bool hasPartsWithCommonPrefix = false;
+ // getExecutor() should never call into buildSlotBasedExecutableTree() when the query
+ // contains $meta, so this assertion should always be true.
for (const auto& part : sortPattern) {
- // getExecutor() should never call into buildSlotBasedExecutableTree() when the query
- // contains $meta, so this assertion should always be true.
tassert(5037002, "Sort with $meta is not supported in SBE", part.fieldPath);
-
- if (!hasPartsWithCommonPrefix) {
- auto [_, prefixWasNotPresent] = prefixSet.insert(part.fieldPath->getFieldName(0));
- hasPartsWithCommonPrefix = !prefixWasNotPresent;
- }
}
+ const bool hasPartsWithCommonPrefix = sortPatternHasPartsWithCommonPrefix(sortPattern);
auto fields = reqs.getFields();
if (!hasPartsWithCommonPrefix) {
diff --git a/src/mongo/db/query/sbe_stage_builder_filter.cpp b/src/mongo/db/query/sbe_stage_builder_filter.cpp
index 57210c4b0c8..ffc9f38260b 100644
--- a/src/mongo/db/query/sbe_stage_builder_filter.cpp
+++ b/src/mongo/db/query/sbe_stage_builder_filter.cpp
@@ -1194,37 +1194,6 @@ public:
private:
MatchExpressionVisitorContext* _context;
};
-
-EvalExpr applyClassicMatcher(const MatchExpression* root,
- EvalExpr inputExpr,
- StageBuilderState& state) {
- return makeFunction("applyClassicMatcher",
- makeConstant(sbe::value::TypeTags::classicMatchExpresion,
- sbe::value::bitcastFrom<const MatchExpression*>(
- root->shallowClone().release())),
- inputExpr.extractExpr(state));
-}
-
-EvalExpr applyClassicMatcherOverIndexScan(const MatchExpression* root,
- const PlanStageSlots* slots,
- const std::vector<std::string>& keyFields) {
- BSONObjBuilder keyPatternBuilder;
- auto keySlots = sbe::makeSV();
- for (const auto& field : keyFields) {
- keyPatternBuilder.append(field, 1);
- keySlots.emplace_back(
- slots->get(std::make_pair(PlanStageSlots::kField, StringData(field))));
- }
-
- auto keyPatternTree = buildKeyPatternTree(keyPatternBuilder.obj(), keySlots);
- auto mkObjExpr = buildNewObjExpr(keyPatternTree.get());
-
- return makeFunction("applyClassicMatcher",
- makeConstant(sbe::value::TypeTags::classicMatchExpresion,
- sbe::value::bitcastFrom<const MatchExpression*>(
- root->shallowClone().release())),
- std::move(mkObjExpr));
-}
} // namespace
EvalExpr generateFilter(StageBuilderState& state,
@@ -1239,18 +1208,6 @@ EvalExpr generateFilter(StageBuilderState& state,
return EvalExpr{};
}
- // We only use the classic matcher path (aka "franken matcher") when SBE is not fully enabled.
- // Fully enabling SBE turns on the SBE plan cache, and embedding the classic matcher into the
- // query execution tree is not compatible with the plan cache's use of auto-parameterization.
- // This is because when embedding the classic matcher all of the constants used in the filter
- // are in the MatchExpression itself rather than in slots.
- if (!feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- tassert(7097207, "Expected input slot to be defined", rootSlot || isFilterOverIxscan);
-
- return isFilterOverIxscan ? applyClassicMatcherOverIndexScan(root, slots, keyFields)
- : applyClassicMatcher(root, toEvalExpr(rootSlot), state);
- }
-
MatchExpressionVisitorContext context{state, rootSlot, root, slots, isFilterOverIxscan};
MatchExpressionPreVisitor preVisitor{&context};
diff --git a/src/mongo/db/s/collection_sharding_runtime.cpp b/src/mongo/db/s/collection_sharding_runtime.cpp
index 385a2b91b67..61dde7e5a32 100644
--- a/src/mongo/db/s/collection_sharding_runtime.cpp
+++ b/src/mongo/db/s/collection_sharding_runtime.cpp
@@ -647,52 +647,48 @@ void CollectionShardingRuntime::_cleanupBeforeInstallingNewCollectionMetadata(
return;
}
- if (feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
- const auto oldUUID = _metadataManager->getCollectionUuid();
- const auto oldShardVersion = _metadataManager->getActiveShardVersion();
- ExecutorFuture<void>{Grid::get(opCtx)->getExecutorPool()->getFixedExecutor()}
- .then([svcCtx{opCtx->getServiceContext()}, oldUUID, oldShardVersion] {
- ThreadClient tc{"CleanUpShardedMetadata", svcCtx};
- {
- stdx::lock_guard<Client> lk{*tc.get()};
- tc->setSystemOperationKillableByStepdown(lk);
- }
- auto uniqueOpCtx{tc->makeOperationContext()};
- auto opCtx{uniqueOpCtx.get()};
-
- try {
- auto& planCache = sbe::getPlanCache(opCtx);
- planCache.removeIf([&](const sbe::PlanCacheKey& key,
- const sbe::PlanCacheEntry& entry) -> bool {
- const auto matchingCollState =
- [&](const sbe::PlanCacheKeyCollectionState& entryCollState) {
- return entryCollState.uuid == oldUUID &&
- entryCollState.shardVersion &&
- entryCollState.shardVersion->epoch == oldShardVersion.epoch() &&
- entryCollState.shardVersion->ts ==
- oldShardVersion.getTimestamp();
- };
-
- // Check whether the main collection of this plan is the one being removed
- if (matchingCollState(key.getMainCollectionState()))
+ const auto oldUUID = _metadataManager->getCollectionUuid();
+ const auto oldShardVersion = _metadataManager->getActiveShardVersion();
+ ExecutorFuture<void>{Grid::get(opCtx)->getExecutorPool()->getFixedExecutor()}
+ .then([svcCtx{opCtx->getServiceContext()}, oldUUID, oldShardVersion] {
+ ThreadClient tc{"CleanUpShardedMetadata", svcCtx};
+ {
+ stdx::lock_guard<Client> lk{*tc.get()};
+ tc->setSystemOperationKillableByStepdown(lk);
+ }
+ auto uniqueOpCtx{tc->makeOperationContext()};
+ auto opCtx{uniqueOpCtx.get()};
+
+ try {
+ auto& planCache = sbe::getPlanCache(opCtx);
+ planCache.removeIf([&](const sbe::PlanCacheKey& key,
+ const sbe::PlanCacheEntry& entry) -> bool {
+ const auto matchingCollState =
+ [&](const sbe::PlanCacheKeyCollectionState& entryCollState) {
+ return entryCollState.uuid == oldUUID && entryCollState.shardVersion &&
+ entryCollState.shardVersion->epoch == oldShardVersion.epoch() &&
+ entryCollState.shardVersion->ts == oldShardVersion.getTimestamp();
+ };
+
+ // Check whether the main collection of this plan is the one being removed
+ if (matchingCollState(key.getMainCollectionState()))
+ return true;
+
+ // Check whether a secondary collection is the one being removed
+ for (const auto& secCollState : key.getSecondaryCollectionStates()) {
+ if (matchingCollState(secCollState))
return true;
-
- // Check whether a secondary collection is the one being removed
- for (const auto& secCollState : key.getSecondaryCollectionStates()) {
- if (matchingCollState(secCollState))
- return true;
- }
-
- return false;
- });
- } catch (const DBException& ex) {
- LOGV2(6549200,
- "Interrupted deferred clean up of sharded metadata",
- "error"_attr = redact(ex));
- }
- })
- .getAsync([](auto) {});
- }
+ }
+
+ return false;
+ });
+ } catch (const DBException& ex) {
+ LOGV2(6549200,
+ "Interrupted deferred clean up of sharded metadata",
+ "error"_attr = redact(ex));
+ }
+ })
+ .getAsync([](auto) {});
}
void CollectionShardingRuntime::_checkCritSecForIndexMetadata(OperationContext* opCtx) const {