summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMihai Andrei <mihai.andrei@10gen.com>2022-05-27 16:45:12 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-05-27 17:59:56 +0000
commitaa40f5e7c0d821915c6225c1eb2001124857d62d (patch)
tree81b088d77ec5b2c598b45badc552ea7813d73b10
parent3e717f0b3ec91d4937363d71b2ee7cc0c20a6221 (diff)
downloadmongo-aa40f5e7c0d821915c6225c1eb2001124857d62d.tar.gz
SERVER-66583 Re-enable SBE as the default execution engine and fall back to classic if no SBE compatible $group or $lookup exists
-rw-r--r--etc/evergreen.yml33
-rw-r--r--etc/generate_subtasks_config.yml4
-rw-r--r--jstests/aggregation/explain_limit.js2
-rw-r--r--jstests/aggregation/expressions/concat_arrays.js2
-rw-r--r--jstests/aggregation/sources/lookup/lookup_collation.js1
-rw-r--r--jstests/aggregation/sources/lookup/profile_lookup.js1
-rw-r--r--jstests/aggregation/sources/project/remove_redundant_projects.js5
-rw-r--r--jstests/aggregation/sources/unionWith/unionWith.js1
-rw-r--r--jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js1
-rw-r--r--jstests/core/cached_plan_trial_does_not_discard_work.js2
-rw-r--r--jstests/core/collation_plan_cache.js3
-rw-r--r--jstests/core/columnstore_eligibility.js3
-rw-r--r--jstests/core/columnstore_index.js3
-rw-r--r--jstests/core/columnstore_index_correctness.js3
-rw-r--r--jstests/core/computed_projections.js2
-rw-r--r--jstests/core/idhack.js3
-rw-r--r--jstests/core/index_filter_catalog_independent.js1
-rw-r--r--jstests/core/index_filter_commands_invalidate_plan_cache_entries.js3
-rw-r--r--jstests/core/introspect_hidden_index_plan_cache_entries.js1
-rw-r--r--jstests/core/plan_cache_list_plans.js2
-rw-r--r--jstests/core/plan_cache_list_shapes.js2
-rw-r--r--jstests/core/plan_cache_sbe.js3
-rw-r--r--jstests/core/plan_cache_shell_helpers.js3
-rw-r--r--jstests/core/query_hash_stability.js2
-rw-r--r--jstests/core/sbe/plan_cache_sbe_with_or_queries.js2
-rw-r--r--jstests/core/sbe/sbe_explain_rejected_plans.js4
-rw-r--r--jstests/core/sbe/sbe_ixscan_explain.js2
-rw-r--r--jstests/core/sbe_plan_cache_autoparameterize_collscan.js2
-rw-r--r--jstests/core/timeseries/timeseries_lookup.js1
-rw-r--r--jstests/core/views/views_collation.js1
-rw-r--r--jstests/core/views/views_validation.js2
-rw-r--r--jstests/core/wildcard_index_cached_plans.js6
-rw-r--r--jstests/core/wildcard_index_filter.js2
-rw-r--r--jstests/libs/sbe_explain_helpers.js6
-rw-r--r--jstests/noPassthrough/external_sort_find.js2
-rw-r--r--jstests/noPassthrough/group_tmp_file_cleanup.js4
-rw-r--r--jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js2
-rw-r--r--jstests/noPassthrough/lookup_pushdown.js42
-rw-r--r--jstests/noPassthrough/plan_cache_index_create.js2
-rw-r--r--jstests/noPassthrough/plan_cache_list_failed_plans.js4
-rw-r--r--jstests/noPassthrough/plan_cache_memory_debug_info.js2
-rw-r--r--jstests/noPassthrough/plan_cache_replan_sort.js2
-rw-r--r--jstests/noPassthrough/plan_cache_stats_agg_source.js2
-rw-r--r--jstests/noPassthrough/query_engine_stats.js9
-rw-r--r--jstests/noPassthrough/query_oplogreplay.js1
-rw-r--r--jstests/noPassthrough/sbe_multiplanner_trial_termination.js9
-rw-r--r--jstests/noPassthrough/sbe_pipeline_plan_cache_key_reporting.js144
-rw-r--r--jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js2
-rw-r--r--jstests/noPassthrough/sbe_plan_cache_memory_debug_info.js2
-rw-r--r--jstests/noPassthrough/sbe_plan_cache_size_metric.js3
-rw-r--r--jstests/noPassthrough/server_status_multiplanner.js8
-rw-r--r--jstests/noPassthroughWithMongod/column_index_skeleton.js12
-rw-r--r--jstests/noPassthroughWithMongod/group_pushdown.js186
-rw-r--r--jstests/noPassthroughWithMongod/index_bounds_static_limit.js2
-rw-r--r--jstests/noPassthroughWithMongod/ne_array_indexability.js2
-rw-r--r--jstests/noPassthroughWithMongod/plan_cache_replanning.js3
-rw-r--r--jstests/noPassthroughWithMongod/sbe_agg_pushdown.js2
-rw-r--r--jstests/sharding/query/collation_lookup.js1
-rw-r--r--src/mongo/db/pipeline/pipeline_d.cpp59
-rw-r--r--src/mongo/db/query/get_executor.cpp37
-rw-r--r--src/mongo/db/query/query_feature_flags.idl8
-rw-r--r--src/mongo/db/query/query_knobs.idl2
-rw-r--r--src/mongo/db/query/query_planner.cpp26
63 files changed, 236 insertions, 458 deletions
diff --git a/etc/evergreen.yml b/etc/evergreen.yml
index 5d0a51174b3..dc62d6e72f7 100644
--- a/etc/evergreen.yml
+++ b/etc/evergreen.yml
@@ -1619,16 +1619,16 @@ buildvariants:
# - rhel80-large
- name: generate_buildid_to_debug_symbols_mapping
-- &enterprise-rhel-80-64-bit-dynamic-sbe-engine
- name: enterprise-rhel-80-64-bit-dynamic-sbe-engine
- display_name: "Shared Library Enterprise RHEL 8.0 (SBE Engine)"
+- &enterprise-rhel-80-64-bit-dynamic-classic-engine
+ name: enterprise-rhel-80-64-bit-dynamic-classic-engine
+ display_name: "Shared Library Enterprise RHEL 8.0 (Classic Engine)"
cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
stepback: false
modules:
- enterprise
run_on:
- rhel80-small
- expansions: &enterprise-rhel-80-64-bit-dynamic-sbe-engine-expansions
+ expansions: &enterprise-rhel-80-64-bit-dynamic-classic-engine-expansions
additional_package_targets: archive-mongocryptd archive-mongocryptd-debug archive-mh archive-mh-debug
compile_flags: --ssl MONGO_DISTMOD=rhel80 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_v3_gcc.vars --link-model=dynamic
multiversion_platform: rhel80
@@ -1644,7 +1644,7 @@ buildvariants:
burn_in_tag_buildvariants: enterprise-rhel-80-64-bit-inmem linux-64-duroff enterprise-rhel-80-64-bit-multiversion
num_scons_link_jobs_available: 0.99
test_flags: >-
- --mongodSetParameters="{internalQueryForceClassicEngine: false}"
+ --mongodSetParameters="{internalQueryForceClassicEngine: true}"
tasks:
- name: .aggfuzzer
- name: .aggregation
@@ -2338,8 +2338,8 @@ buildvariants:
expansions:
toolchain_version: v4
-- name: rhel80-debug-asan-sbe-engine
- display_name: ~ ASAN Enterprise RHEL 8.0 DEBUG (SBE Engine)
+- name: rhel80-debug-asan-classic-engine
+ display_name: ~ ASAN Enterprise RHEL 8.0 DEBUG (Classic Engine)
cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
modules:
- enterprise
@@ -2354,7 +2354,7 @@ buildvariants:
san_options: LSAN_OPTIONS="suppressions=etc/lsan.suppressions:report_objects=1:external_symbolizer_path=/opt/mongodbtoolchain/v3/bin/llvm-symbolizer" ASAN_OPTIONS="detect_leaks=1:check_initialization_order=true:strict_init_order=true:abort_on_error=1:disable_coredump=0:handle_abort=1:external_symbolizer_path=/opt/mongodbtoolchain/v3/bin/llvm-symbolizer"
compile_flags: --variables-files=etc/scons/mongodbtoolchain_v3_clang.vars --dbg=on --opt=on --allocator=system --sanitize=address --ssl --ocsp-stapling=off --enable-free-mon=on -j$(grep -c ^processor /proc/cpuinfo)
test_flags: >-
- --mongodSetParameters="{internalQueryForceClassicEngine: false}"
+ --mongodSetParameters="{internalQueryForceClassicEngine: true}"
--excludeWithAnyTags=requires_fast_memory,requires_ocsp_stapling
multiversion_platform: rhel80
multiversion_edition: enterprise
@@ -2493,8 +2493,8 @@ buildvariants:
expansions:
toolchain_version: v4
-- name: rhel80-debug-ubsan-sbe-engine
- display_name: "~ UBSAN Enterprise RHEL 8.0 DEBUG (SBE Engine)"
+- name: rhel80-debug-ubsan-classic-engine
+ display_name: "~ UBSAN Enterprise RHEL 8.0 DEBUG (Classic Engine)"
cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
modules:
- enterprise
@@ -2509,7 +2509,7 @@ buildvariants:
san_options: UBSAN_OPTIONS="print_stacktrace=1:external_symbolizer_path=/opt/mongodbtoolchain/v3/bin/llvm-symbolizer"
compile_flags: --variables-files=etc/scons/mongodbtoolchain_v3_clang.vars --dbg=on --opt=on --sanitize=undefined --ssl --ocsp-stapling=off --enable-free-mon=on -j$(grep -c ^processor /proc/cpuinfo)
test_flags: >-
- --mongodSetParameters="{internalQueryForceClassicEngine: false}"
+ --mongodSetParameters="{internalQueryForceClassicEngine: true}"
--excludeWithAnyTags=requires_ocsp_stapling
multiversion_platform: rhel80
multiversion_edition: enterprise
@@ -3044,21 +3044,20 @@ buildvariants:
- windows-vsCurrent-large
### QO & QE Patch-Specific Build Variants ###
-- <<: *enterprise-rhel-80-64-bit-dynamic-sbe-engine
- name: enterprise-rhel-80-64-bit-dynamic-sbe-engine-query-patch-only
- display_name: "~ Shared Library Enterprise RHEL 8.0 Query Patch Only (SBE Engine)"
+- <<: *enterprise-rhel-80-64-bit-dynamic-classic-engine
+ name: enterprise-rhel-80-64-bit-dynamic-classic-engine-query-patch-only
+ display_name: "~ Shared Library Enterprise RHEL 8.0 Query Patch Only (Classic Engine)"
cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter # This is a patch-only variant but we run on mainline to pick up task history.
expansions:
- <<: *enterprise-rhel-80-64-bit-dynamic-sbe-engine-expansions
+ <<: *enterprise-rhel-80-64-bit-dynamic-classic-engine-expansions
jstestfuzz_num_generated_files: 20
jstestfuzz_concurrent_num_files: 5
target_resmoke_time: 30
max_sub_suites: 3
test_flags: >-
- --mongodSetParameters="{internalQueryForceClassicEngine: false}"
+ --mongodSetParameters="{internalQueryForceClassicEngine: true}"
--excludeWithAnyTags=resource_intensive
-# Intentionally derive from SBE to run the SBE tests with all feature flags.
- <<: *enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-template
name: enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-query-patch-only
display_name: "~ Shared Library Enterprise RHEL 8.0 Query Patch Only (all feature flags)"
diff --git a/etc/generate_subtasks_config.yml b/etc/generate_subtasks_config.yml
index 059623a13ce..11d08c85b02 100644
--- a/etc/generate_subtasks_config.yml
+++ b/etc/generate_subtasks_config.yml
@@ -46,8 +46,8 @@ build_variant_large_distro_exceptions:
- ubuntu1604-debug
- ubuntu1804-debug-asan
- ubuntu1804-debug-asan-all-feature-flags
- - ubuntu1804-debug-asan-sbe-engine
+ - ubuntu1804-debug-asan-classic-engine
- ubuntu1804-debug-aubsan-lite-required
- ubuntu1804-debug-ubsan
- ubuntu1804-debug-ubsan-all-feature-flags
- - ubuntu1804-debug-ubsan-sbe-engine
+ - ubuntu1804-debug-ubsan-classic-engine
diff --git a/jstests/aggregation/explain_limit.js b/jstests/aggregation/explain_limit.js
index 1c701f28ddd..bfa2c7bac38 100644
--- a/jstests/aggregation/explain_limit.js
+++ b/jstests/aggregation/explain_limit.js
@@ -16,7 +16,7 @@ let coll = db.explain_limit;
const kCollSize = 105;
const kLimit = 10;
-const isSBEEnabled = checkSBEEnabled(db);
+const isSBEEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
// Return whether or explain() was successful and contained the appropriate fields given the
// requested verbosity. Checks that the number of documents examined and returned are correct given
diff --git a/jstests/aggregation/expressions/concat_arrays.js b/jstests/aggregation/expressions/concat_arrays.js
index 351d36dccf7..5e8aa757e45 100644
--- a/jstests/aggregation/expressions/concat_arrays.js
+++ b/jstests/aggregation/expressions/concat_arrays.js
@@ -130,7 +130,7 @@ runAndAssertThrows(["$dbl_arr", "$dbl_val"]);
// Confirm edge case where if invalid input precedes null or missing inputs, the command fails.
// Note that when the SBE engine is enabled, null will be returned before invalid input because
// we check if any values are null before checking whether all values are arrays.
-let evalFn = checkSBEEnabled(db) ? runAndAssertNull : runAndAssertThrows;
+let evalFn = checkSBEEnabled(db, ["featureFlagSbeFull"]) ? runAndAssertNull : runAndAssertThrows;
evalFn(["$int_arr", "$dbl_val", "$null_val"]);
evalFn(["$int_arr", "some_string_value", "$null_val"]);
evalFn(["$dbl_val", "$null_val"]);
diff --git a/jstests/aggregation/sources/lookup/lookup_collation.js b/jstests/aggregation/sources/lookup/lookup_collation.js
index 4af61a41234..50ec057ae79 100644
--- a/jstests/aggregation/sources/lookup/lookup_collation.js
+++ b/jstests/aggregation/sources/lookup/lookup_collation.js
@@ -13,7 +13,6 @@
* 2. 'collation' option overrides local collection's collation
*/
load("jstests/aggregation/extras/utils.js"); // For anyEq.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
load("jstests/libs/analyze_plan.js"); // For getAggPlanStages, getWinningPlan.
(function() {
diff --git a/jstests/aggregation/sources/lookup/profile_lookup.js b/jstests/aggregation/sources/lookup/profile_lookup.js
index ac25dedc54c..c3eaddfcb6e 100644
--- a/jstests/aggregation/sources/lookup/profile_lookup.js
+++ b/jstests/aggregation/sources/lookup/profile_lookup.js
@@ -5,7 +5,6 @@
(function() {
"use strict";
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
load("jstests/libs/analyze_plan.js"); // For getAggPlanStages.
const localColl = db.local;
diff --git a/jstests/aggregation/sources/project/remove_redundant_projects.js b/jstests/aggregation/sources/project/remove_redundant_projects.js
index 967d147fe85..fe31fade134 100644
--- a/jstests/aggregation/sources/project/remove_redundant_projects.js
+++ b/jstests/aggregation/sources/project/remove_redundant_projects.js
@@ -129,6 +129,11 @@ assertResultsMatch({
expectProjectToCoalesce: true,
pipelineOptimizedAway: true
});
+assertResultsMatch({
+ pipeline: [{$sort: {a: 1}}, {$group: {_id: "$_id", a: {$sum: "$a"}}}, {$project: {arr: 1}}],
+ expectProjectToCoalesce:
+ !groupPushdownEnabled, // lowering $group into SBE prevents coalesing of projects
+});
// Test that projections with computed fields are removed from the pipeline.
assertResultsMatch({
diff --git a/jstests/aggregation/sources/unionWith/unionWith.js b/jstests/aggregation/sources/unionWith/unionWith.js
index 3f997823ebe..81b2f464337 100644
--- a/jstests/aggregation/sources/unionWith/unionWith.js
+++ b/jstests/aggregation/sources/unionWith/unionWith.js
@@ -7,7 +7,6 @@
load("jstests/aggregation/extras/utils.js"); // For arrayEq.
load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection.
load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs.
const testDB = db.getSiblingDB(jsTestName());
diff --git a/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js b/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js
index 402000e83dd..e5ed57ed157 100644
--- a/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js
+++ b/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js
@@ -1,7 +1,6 @@
'use strict';
load("jstests/libs/fixture_helpers.js"); // For isSharded.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
/**
* view_catalog_cycle_lookup.js
diff --git a/jstests/core/cached_plan_trial_does_not_discard_work.js b/jstests/core/cached_plan_trial_does_not_discard_work.js
index d1d985ea79b..a7a2d2ffd9c 100644
--- a/jstests/core/cached_plan_trial_does_not_discard_work.js
+++ b/jstests/core/cached_plan_trial_does_not_discard_work.js
@@ -19,7 +19,7 @@
load("jstests/libs/profiler.js"); // getLatestProfileEntry.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-if (!checkSBEEnabled(db)) {
+if (!checkSBEEnabled(db, ["featureFlagSbeFull"])) {
jsTestLog("Skipping test because SBE is disabled");
return;
}
diff --git a/jstests/core/collation_plan_cache.js b/jstests/core/collation_plan_cache.js
index 9a5857a7dde..8f108a89b9e 100644
--- a/jstests/core/collation_plan_cache.js
+++ b/jstests/core/collation_plan_cache.js
@@ -14,7 +14,8 @@
load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromExplain.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-const isSbePlanCacheEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache"]);
+const isSbePlanCacheEnabled =
+ checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"]);
var coll = db.collation_plan_cache;
coll.drop();
diff --git a/jstests/core/columnstore_eligibility.js b/jstests/core/columnstore_eligibility.js
index 3c4b4d8f443..6e655276684 100644
--- a/jstests/core/columnstore_eligibility.js
+++ b/jstests/core/columnstore_eligibility.js
@@ -18,7 +18,8 @@
load("jstests/libs/analyze_plan.js");
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-const columnstoreEnabled = checkSBEEnabled(db, ["featureFlagColumnstoreIndexes"]);
+const columnstoreEnabled =
+ checkSBEEnabled(db, ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"]);
if (!columnstoreEnabled) {
jsTestLog("Skipping columnstore index validation test since the feature flag is not enabled.");
return;
diff --git a/jstests/core/columnstore_index.js b/jstests/core/columnstore_index.js
index 74a5bda1245..41d4ac18e49 100644
--- a/jstests/core/columnstore_index.js
+++ b/jstests/core/columnstore_index.js
@@ -18,7 +18,8 @@
load("jstests/libs/analyze_plan.js");
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-const columnstoreEnabled = checkSBEEnabled(db, ["featureFlagColumnstoreIndexes"]);
+const columnstoreEnabled =
+ checkSBEEnabled(db, ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"]);
if (!columnstoreEnabled) {
jsTestLog("Skipping columnstore index validation test since the feature flag is not enabled.");
return;
diff --git a/jstests/core/columnstore_index_correctness.js b/jstests/core/columnstore_index_correctness.js
index ffd0755a33a..b0115af1398 100644
--- a/jstests/core/columnstore_index_correctness.js
+++ b/jstests/core/columnstore_index_correctness.js
@@ -17,7 +17,8 @@ load("jstests/libs/fail_point_util.js");
load("jstests/libs/analyze_plan.js");
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-const columnstoreEnabled = checkSBEEnabled(db, ["featureFlagColumnstoreIndexes"]);
+const columnstoreEnabled =
+ checkSBEEnabled(db, ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"]);
if (!columnstoreEnabled) {
jsTestLog("Skipping columnstore index validation test since the feature flag is not enabled.");
return;
diff --git a/jstests/core/computed_projections.js b/jstests/core/computed_projections.js
index 6a0c73118f2..06ea2476bd9 100644
--- a/jstests/core/computed_projections.js
+++ b/jstests/core/computed_projections.js
@@ -2,7 +2,7 @@
"use strict";
load("jstests/aggregation/extras/utils.js"); // For arrayEq and orderedArrayEq.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
+load("jstests/libs/sbe_util.js"); // For checkSBEEnabledOnSomeNode.
const isSBEEnabled = checkSBEEnabledOnSomeNode(db);
if (isSBEEnabled) {
diff --git a/jstests/core/idhack.js b/jstests/core/idhack.js
index 487490c59ab..ff541a59c0f 100644
--- a/jstests/core/idhack.js
+++ b/jstests/core/idhack.js
@@ -62,7 +62,8 @@ winningPlan = getWinningPlan(explain.queryPlanner);
engineSpecificAssertion(!isIdhack(db, winningPlan), isIxscan(db, winningPlan), db, winningPlan);
// Covered query returning _id field only can be handled by ID hack.
-const isSBEPlanCacheEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache"]);
+const isSBEPlanCacheEnabled =
+ checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"]);
const parentStage = isSBEPlanCacheEnabled ? "PROJECTION_COVERED" : "FETCH";
explain = t.find(query, {_id: 1}).explain();
winningPlan = getWinningPlan(explain.queryPlanner);
diff --git a/jstests/core/index_filter_catalog_independent.js b/jstests/core/index_filter_catalog_independent.js
index 68e1100a9a9..48889c7e414 100644
--- a/jstests/core/index_filter_catalog_independent.js
+++ b/jstests/core/index_filter_catalog_independent.js
@@ -14,7 +14,6 @@
"use strict";
load("jstests/libs/analyze_plan.js"); // For getPlanStages.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
const collName = "index_filter_catalog_independent";
const coll = db[collName];
diff --git a/jstests/core/index_filter_commands_invalidate_plan_cache_entries.js b/jstests/core/index_filter_commands_invalidate_plan_cache_entries.js
index 4e4b1330c21..e4011c61198 100644
--- a/jstests/core/index_filter_commands_invalidate_plan_cache_entries.js
+++ b/jstests/core/index_filter_commands_invalidate_plan_cache_entries.js
@@ -19,7 +19,8 @@ load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
// For testing convenience this variable is made an integer "1" if featureFlagSbePlanCache is on,
// because the expected amount of plan cache entries differs between the two different plan caches.
-const isSbePlanCacheEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache"]) ? 1 : 0;
+const isSbePlanCacheEnabled =
+ checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"]) ? 1 : 0;
const collName = "index_filter_commands_invalidate_plan_cache_entries";
const coll = db[collName];
diff --git a/jstests/core/introspect_hidden_index_plan_cache_entries.js b/jstests/core/introspect_hidden_index_plan_cache_entries.js
index 9f922478529..decc6623ba9 100644
--- a/jstests/core/introspect_hidden_index_plan_cache_entries.js
+++ b/jstests/core/introspect_hidden_index_plan_cache_entries.js
@@ -14,7 +14,6 @@
(function() {
'use strict';
load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
const collName = 'introspect_hidden_index_plan_cache_entries';
const collNotAffectedName = 'introspect_hidden_index_plan_cache_entries_unaffected';
diff --git a/jstests/core/plan_cache_list_plans.js b/jstests/core/plan_cache_list_plans.js
index 69ade6c65d6..ca571a9e39e 100644
--- a/jstests/core/plan_cache_list_plans.js
+++ b/jstests/core/plan_cache_list_plans.js
@@ -23,7 +23,7 @@ load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
let coll = db.jstests_plan_cache_list_plans;
coll.drop();
-const isSBEAndPlanCacheOn = checkSBEEnabled(db, ["featureFlagSbePlanCache"]);
+const isSBEAndPlanCacheOn = checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"]);
function dumpPlanCacheState() {
return coll.aggregate([{$planCacheStats: {}}]).toArray();
diff --git a/jstests/core/plan_cache_list_shapes.js b/jstests/core/plan_cache_list_shapes.js
index 8e6a86b1494..83dd402cbbb 100644
--- a/jstests/core/plan_cache_list_shapes.js
+++ b/jstests/core/plan_cache_list_shapes.js
@@ -16,7 +16,7 @@
'use strict';
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-if (checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
+if (checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
jsTest.log("Skipping test because SBE and SBE plan cache are both enabled.");
return;
}
diff --git a/jstests/core/plan_cache_sbe.js b/jstests/core/plan_cache_sbe.js
index dfed346f5ee..6ca74ad42f3 100644
--- a/jstests/core/plan_cache_sbe.js
+++ b/jstests/core/plan_cache_sbe.js
@@ -24,7 +24,8 @@ load("jstests/libs/sbe_explain_helpers.js"); // For engineSpecificAssertion.
const coll = db.plan_cache_sbe;
coll.drop();
-const isSbePlanCacheEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache"]);
+const isSbePlanCacheEnabled =
+ checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"]);
assert.commandWorked(coll.insert({a: 1, b: 1}));
diff --git a/jstests/core/plan_cache_shell_helpers.js b/jstests/core/plan_cache_shell_helpers.js
index 3b139313e30..6b596c8efea 100644
--- a/jstests/core/plan_cache_shell_helpers.js
+++ b/jstests/core/plan_cache_shell_helpers.js
@@ -16,7 +16,8 @@
load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-const isSBEPlanCacheEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache"]);
+const isSBEPlanCacheEnabled =
+ checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"]);
var coll = db.jstests_plan_cache_shell_helpers;
coll.drop();
diff --git a/jstests/core/query_hash_stability.js b/jstests/core/query_hash_stability.js
index 024ec20703d..129c8a4c701 100644
--- a/jstests/core/query_hash_stability.js
+++ b/jstests/core/query_hash_stability.js
@@ -112,7 +112,7 @@ assertPlanCacheField({
// SBE's planCacheKey encoding encodes "collection version" which will be increased after dropping
// an index.
-if (!checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
+if (!checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
// The 'planCacheKey' should be the same as what it was before we dropped the index.
assertPlanCacheField({
firstExplain: initialExplain,
diff --git a/jstests/core/sbe/plan_cache_sbe_with_or_queries.js b/jstests/core/sbe/plan_cache_sbe_with_or_queries.js
index 19b66ee05cf..f5e61d19110 100644
--- a/jstests/core/sbe/plan_cache_sbe_with_or_queries.js
+++ b/jstests/core/sbe/plan_cache_sbe_with_or_queries.js
@@ -16,7 +16,7 @@
load("jstests/libs/analyze_plan.js");
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-const isSBEEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache"]);
+const isSBEEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"]);
if (!isSBEEnabled) {
jsTest.log("Skip running the test because featureFlagSbePlanCache is not enabled");
return;
diff --git a/jstests/core/sbe/sbe_explain_rejected_plans.js b/jstests/core/sbe/sbe_explain_rejected_plans.js
index acf2101a4f2..76abf9c8a77 100644
--- a/jstests/core/sbe/sbe_explain_rejected_plans.js
+++ b/jstests/core/sbe/sbe_explain_rejected_plans.js
@@ -7,9 +7,9 @@
load("jstests/libs/analyze_plan.js");
load("jstests/libs/collection_drop_recreate.js");
-load("jstests/libs/sbe_util.js");
+load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled'.
-const isSBEEnabled = checkSBEEnabled(db);
+const isSBEEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
if (!isSBEEnabled) {
jsTestLog("Skipping test because the SBE feature flag is disabled");
return;
diff --git a/jstests/core/sbe/sbe_ixscan_explain.js b/jstests/core/sbe/sbe_ixscan_explain.js
index 970e8e9887a..d5177fdf0a6 100644
--- a/jstests/core/sbe/sbe_ixscan_explain.js
+++ b/jstests/core/sbe/sbe_ixscan_explain.js
@@ -12,7 +12,7 @@
load('jstests/libs/analyze_plan.js'); // For getPlanStages
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-const isSBEEnabled = checkSBEEnabled(db);
+const isSBEEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
if (!isSBEEnabled) {
jsTestLog("Skipping test because the SBE feature flag is disabled");
return;
diff --git a/jstests/core/sbe_plan_cache_autoparameterize_collscan.js b/jstests/core/sbe_plan_cache_autoparameterize_collscan.js
index 18fdf4305ca..63054ff7d7d 100644
--- a/jstests/core/sbe_plan_cache_autoparameterize_collscan.js
+++ b/jstests/core/sbe_plan_cache_autoparameterize_collscan.js
@@ -19,7 +19,7 @@ load("jstests/libs/sbe_util.js");
// This test is specifically verifying the behavior of the SBE plan cache. So if either the SBE plan
// cache or SBE itself are disabled, bail out.
-if (!checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
+if (!checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
jsTestLog("Skipping test because either SBE engine or SBE plan cache is disabled");
return;
}
diff --git a/jstests/core/timeseries/timeseries_lookup.js b/jstests/core/timeseries/timeseries_lookup.js
index 75a80a48177..c173764a591 100644
--- a/jstests/core/timeseries/timeseries_lookup.js
+++ b/jstests/core/timeseries/timeseries_lookup.js
@@ -11,7 +11,6 @@
"use strict";
load("jstests/core/timeseries/libs/timeseries.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
TimeseriesTest.run((insert) => {
const testDB = db.getSiblingDB(jsTestName());
diff --git a/jstests/core/views/views_collation.js b/jstests/core/views/views_collation.js
index f83f253372b..169e9309740 100644
--- a/jstests/core/views/views_collation.js
+++ b/jstests/core/views/views_collation.js
@@ -15,7 +15,6 @@
"use strict";
load("jstests/libs/analyze_plan.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
let viewsDB = db.getSiblingDB("views_collation");
assert.commandWorked(viewsDB.dropDatabase());
diff --git a/jstests/core/views/views_validation.js b/jstests/core/views/views_validation.js
index c4a112352c0..02c060fd50a 100644
--- a/jstests/core/views/views_validation.js
+++ b/jstests/core/views/views_validation.js
@@ -7,8 +7,6 @@
(function() {
"use strict";
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-
let viewsDb = db.getSiblingDB("views_validation");
const kMaxViewDepth = 20;
diff --git a/jstests/core/wildcard_index_cached_plans.js b/jstests/core/wildcard_index_cached_plans.js
index 7ca356726c1..9b0b0c33b6d 100644
--- a/jstests/core/wildcard_index_cached_plans.js
+++ b/jstests/core/wildcard_index_cached_plans.js
@@ -29,6 +29,8 @@ coll.drop();
assert.commandWorked(coll.createIndex({"b.$**": 1}));
assert.commandWorked(coll.createIndex({"a": 1}));
+const sbePlanCacheEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"]);
+
// In order for the plan cache to be used, there must be more than one plan available. Insert
// data into the collection such that the b.$** index will be far more selective than the index
// on 'a' for the query {a: 1, b: 1}.
@@ -70,7 +72,7 @@ for (let i = 0; i < 2; i++) {
let cacheEntry = getCacheEntryForQuery(query);
assert.neq(cacheEntry, null);
assert.eq(cacheEntry.isActive, true);
-if (!checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
+if (!sbePlanCacheEnabled) {
// Should be at least two plans: one using the {a: 1} index and the other using the b.$** index.
assert.gte(cacheEntry.creationExecStats.length, 2, tojson(cacheEntry.plans));
@@ -123,7 +125,7 @@ assert.neq(getPlanCacheKeyFromShape({query: queryWithBNull, collection: coll, db
// There should only have been one solution for the above query, so it would get cached only by the
// SBE plan cache.
cacheEntry = getCacheEntryForQuery({a: 1, b: null});
-if (checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
+if (sbePlanCacheEnabled) {
assert.neq(cacheEntry, null);
assert.eq(cacheEntry.isActive, true, cacheEntry);
assert.eq(cacheEntry.isPinned, true, cacheEntry);
diff --git a/jstests/core/wildcard_index_filter.js b/jstests/core/wildcard_index_filter.js
index f247fde7f63..e4f06a30fc7 100644
--- a/jstests/core/wildcard_index_filter.js
+++ b/jstests/core/wildcard_index_filter.js
@@ -14,7 +14,7 @@ load("jstests/libs/analyze_plan.js");
load("jstests/libs/fixture_helpers.js"); // For 'isMongos()'.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-if (checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
+if (checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
jsTest.log("Skipping test because SBE and SBE plan cache are both enabled.");
return;
}
diff --git a/jstests/libs/sbe_explain_helpers.js b/jstests/libs/sbe_explain_helpers.js
index e25ff0d5544..994394c9abb 100644
--- a/jstests/libs/sbe_explain_helpers.js
+++ b/jstests/libs/sbe_explain_helpers.js
@@ -44,12 +44,14 @@ function getSbePlanStages(queryLayerOutput, stage) {
* Helper to make an assertion depending on the engine being used. If we're in a mixed version
* cluster, then we assert that either 'classicAssert' or 'sbeAssert' is true because the outcome
* will depend on which node we're making assertions against. If we're not in a mixed version
- * scenario, then we make an assertion depending on the value of 'isSBEEnabled'.
+ * scenario, then we make an assertion depending on the return value of 'checkSBEEnabled'.
*/
function engineSpecificAssertion(classicAssert, sbeAssert, theDB, msg) {
if (checkBothEnginesAreRunOnCluster(theDB)) {
assert(classicAssert || sbeAssert, msg);
- } else if (checkSBEEnabled(theDB)) {
+ } else if (checkSBEEnabled(theDB, ["featureFlagSbeFull"])) {
+ // This function assumes that SBE is fully enabled, and will fall back to the classic
+ // assert if it is not.
assert(sbeAssert, msg);
} else {
assert(classicAssert, msg);
diff --git a/jstests/noPassthrough/external_sort_find.js b/jstests/noPassthrough/external_sort_find.js
index a1505f129a3..befac93c5dc 100644
--- a/jstests/noPassthrough/external_sort_find.js
+++ b/jstests/noPassthrough/external_sort_find.js
@@ -21,7 +21,7 @@ assert.neq(null, conn, "mongod was unable to start up with options: " + tojson(o
const testDb = conn.getDB("test");
const collection = testDb.external_sort_find;
-const isSBEEnabled = checkSBEEnabled(testDb);
+const isSBEEnabled = checkSBEEnabled(testDb, ["featureFlagSbeFull"]);
// Construct a document that is just over 1 kB.
const charToRepeat = "-";
diff --git a/jstests/noPassthrough/group_tmp_file_cleanup.js b/jstests/noPassthrough/group_tmp_file_cleanup.js
index 329a3f721e9..42c7b95de88 100644
--- a/jstests/noPassthrough/group_tmp_file_cleanup.js
+++ b/jstests/noPassthrough/group_tmp_file_cleanup.js
@@ -22,7 +22,9 @@ const largeStr = "A".repeat(1024 * 1024); // 1MB string
for (let i = 0; i < memoryLimitMb + 1; ++i)
assert.commandWorked(testDb.largeColl.insert({x: i, largeStr: largeStr + i}));
-const pipeline = [{$group: {_id: '$largeStr', minId: {$min: '$_id'}}}];
+// Inhibit optimization so that $group runs in the classic engine.
+const pipeline =
+ [{$_internalInhibitOptimization: {}}, {$group: {_id: '$largeStr', minId: {$min: '$_id'}}}];
// Make sure that the pipeline needs to spill to disk.
assert.throwsWithCode(() => testDb.largeColl.aggregate(pipeline, {allowDiskUse: false}),
ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);
diff --git a/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js b/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js
index 856e752f627..5f932287b5d 100644
--- a/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js
+++ b/jstests/noPassthrough/log_format_slowms_samplerate_loglevel.js
@@ -86,7 +86,7 @@ function runLoggingTests({db, slowMs, logLevel, sampleRate}) {
assert.commandWorked(db.setLogLevel(logLevel, "command"));
assert.commandWorked(db.setLogLevel(logLevel, "write"));
- const isSBEEnabled = checkSBEEnabled(db);
+ const isSBEEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
// Certain fields in the log lines on mongoD are not applicable in their counterparts on
// mongoS, and vice-versa. Ignore these fields when examining the logs of an instance on
diff --git a/jstests/noPassthrough/lookup_pushdown.js b/jstests/noPassthrough/lookup_pushdown.js
index e5507cc46bb..25b5d65dbfb 100644
--- a/jstests/noPassthrough/lookup_pushdown.js
+++ b/jstests/noPassthrough/lookup_pushdown.js
@@ -294,6 +294,48 @@ function setLookupPushdownDisabled(value) {
{allowDiskUse: true});
}());
+// Verify that SBE is only used when a $lookup or a $group is present.
+(function testLookupGroupIsRequiredForPushdown() {
+ // Don't execute this test case if SBE is fully enabled.
+ if (checkSBEEnabled(db, ["featureFlagSbeFull"])) {
+ jsTestLog("Skipping test case because we are supporting SBE beyond $group and $lookup" +
+ " pushdown");
+ return;
+ }
+
+ const assertEngineUsed = function(pipeline, isSBE) {
+ const explain = coll.explain().aggregate(pipeline);
+ assert(explain.hasOwnProperty("explainVersion"), explain);
+ if (isSBE) {
+ assert.eq(explain.explainVersion, 2, explain);
+ } else {
+ assert.eq(explain.explainVersion, 1, explain);
+ }
+ };
+
+ const lookup = {$lookup: {from: "coll", localField: "a", foreignField: "b", as: "out"}};
+ const group = {
+ $group: {
+ _id: "$a",
+ out: {$min: "$b"},
+ }
+ };
+ const match = {$match: {a: 1}};
+
+ // $lookup and $group should each run in SBE.
+ assertEngineUsed([lookup], true /* isSBE */);
+ assertEngineUsed([group], true /* isSBE */);
+ assertEngineUsed([lookup, group], true /* isSBE */);
+
+ // $match on its own won't use SBE, nor will an empty pipeline.
+ assertEngineUsed([match], false /* isSBE */);
+ assertEngineUsed([], false /* isSBE */);
+
+ // $match will use SBE if followed by either a $group or a $lookup.
+ assertEngineUsed([match, lookup], true /* isSBE */);
+ assertEngineUsed([match, group], true /* isSBE */);
+})();
+
// Build an index on the foreign collection that matches the foreignField. This should cause us
// to choose an indexed nested loop join.
(function testIndexNestedLoopJoinRegularIndex() {
diff --git a/jstests/noPassthrough/plan_cache_index_create.js b/jstests/noPassthrough/plan_cache_index_create.js
index 3ce1731b847..862a46b7c64 100644
--- a/jstests/noPassthrough/plan_cache_index_create.js
+++ b/jstests/noPassthrough/plan_cache_index_create.js
@@ -178,7 +178,7 @@ rst.initiate();
const primaryDB = rst.getPrimary().getDB(dbName);
const secondaryDB = rst.getSecondary().getDB(dbName);
-if (checkSBEEnabled(primaryDB, ["featureFlagSbePlanCache"])) {
+if (checkSBEEnabled(primaryDB, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
jsTest.log("Skipping test because SBE and SBE plan cache are both enabled.");
rst.stopSet();
return;
diff --git a/jstests/noPassthrough/plan_cache_list_failed_plans.js b/jstests/noPassthrough/plan_cache_list_failed_plans.js
index 696237a3248..853680f1872 100644
--- a/jstests/noPassthrough/plan_cache_list_failed_plans.js
+++ b/jstests/noPassthrough/plan_cache_list_failed_plans.js
@@ -8,9 +8,9 @@ const conn = MongoRunner.runMongod();
assert.neq(null, conn, "mongod was unable to start up");
const testDB = conn.getDB("jstests_plan_cache_list_failed_plans");
const coll = testDB.test;
-const isSBEEnabled = checkSBEEnabled(testDB);
+const isSBEEnabled = checkSBEEnabled(testDB, ["featureFlagSbeFull"]);
-if (checkSBEEnabled(testDB, ["featureFlagSbePlanCache"])) {
+if (checkSBEEnabled(testDB, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
jsTest.log("Skipping test because SBE and SBE plan cache are both enabled.");
MongoRunner.stopMongod(conn);
return;
diff --git a/jstests/noPassthrough/plan_cache_memory_debug_info.js b/jstests/noPassthrough/plan_cache_memory_debug_info.js
index 5f4b9b20678..e7c36ac4fff 100644
--- a/jstests/noPassthrough/plan_cache_memory_debug_info.js
+++ b/jstests/noPassthrough/plan_cache_memory_debug_info.js
@@ -79,7 +79,7 @@ assert.neq(conn, null, "mongod failed to start");
const db = conn.getDB("test");
const coll = db.plan_cache_memory_debug_info;
-if (checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
+if (checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
jsTest.log("Skipping test because SBE and SBE plan cache are both enabled.");
MongoRunner.stopMongod(conn);
return;
diff --git a/jstests/noPassthrough/plan_cache_replan_sort.js b/jstests/noPassthrough/plan_cache_replan_sort.js
index 55398c422ea..e905c1a4425 100644
--- a/jstests/noPassthrough/plan_cache_replan_sort.js
+++ b/jstests/noPassthrough/plan_cache_replan_sort.js
@@ -44,7 +44,7 @@ assert.eq(1, cachedPlans.length, cachedPlans);
assert.eq(true, cachedPlans[0].isActive, cachedPlans);
const cachedPlan = getCachedPlan(cachedPlans[0].cachedPlan);
const cachedPlanVersion = cachedPlans[0].version;
-if (checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
+if (checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
// If the SBE plan cache is on, then the cached plan has a different format.
assert.eq(cachedPlanVersion, "2", cachedPlans);
assert(cachedPlan.stages.includes("sort"), cachedPlans);
diff --git a/jstests/noPassthrough/plan_cache_stats_agg_source.js b/jstests/noPassthrough/plan_cache_stats_agg_source.js
index 250a8a7d307..c2cef0bf538 100644
--- a/jstests/noPassthrough/plan_cache_stats_agg_source.js
+++ b/jstests/noPassthrough/plan_cache_stats_agg_source.js
@@ -16,7 +16,7 @@ const coll = testDb.plan_cache_stats_agg_source;
// Note that the "getParameter" command is expected to fail in versions of mongod that do not yet
// include the slot-based execution engine. When that happens, however, 'isSBEEnabled' still
// correctly evaluates to false.
-const isSBEEnabled = checkSBEEnabled(testDb);
+const isSBEEnabled = checkSBEEnabled(testDb, ["featureFlagSbeFull"]);
function makeMatchForFilteringByShape(query) {
const keyHash = getPlanCacheKeyFromShape({query: query, collection: coll, db: testDb});
diff --git a/jstests/noPassthrough/query_engine_stats.js b/jstests/noPassthrough/query_engine_stats.js
index 492b4792efe..250943806d1 100644
--- a/jstests/noPassthrough/query_engine_stats.js
+++ b/jstests/noPassthrough/query_engine_stats.js
@@ -8,11 +8,20 @@
// For 'getLatestProfilerEntry()'.
load("jstests/libs/profiler.js");
+load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'.
const conn = MongoRunner.runMongod({});
assert.neq(null, conn, "mongod was unable to start up");
const db = conn.getDB(jsTestName());
+
+// This test assumes that SBE is being used for most queries.
+if (!checkSBEEnabled(db, ["featureFlagSbeFull"])) {
+ jsTestLog("Skipping test because SBE is not fully enabled");
+ MongoRunner.stopMongod(conn);
+ return;
+}
+
assert.commandWorked(db.dropDatabase());
const coll = db.collection;
diff --git a/jstests/noPassthrough/query_oplogreplay.js b/jstests/noPassthrough/query_oplogreplay.js
index f59874e0c29..4fba7c108b7 100644
--- a/jstests/noPassthrough/query_oplogreplay.js
+++ b/jstests/noPassthrough/query_oplogreplay.js
@@ -6,7 +6,6 @@
load("jstests/libs/analyze_plan.js");
load("jstests/libs/storage_engine_utils.js");
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
let replSet = new ReplSetTest({nodes: 1});
replSet.startSet();
diff --git a/jstests/noPassthrough/sbe_multiplanner_trial_termination.js b/jstests/noPassthrough/sbe_multiplanner_trial_termination.js
index 75739587a9f..e429ae865ad 100644
--- a/jstests/noPassthrough/sbe_multiplanner_trial_termination.js
+++ b/jstests/noPassthrough/sbe_multiplanner_trial_termination.js
@@ -6,6 +6,8 @@
(function() {
"use strict";
+load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'.
+
const numDocs = 1000;
const dbName = "sbe_multiplanner_db";
const collName = "sbe_multiplanner_coll";
@@ -21,6 +23,13 @@ const trialLengthFromWorksKnob = 0.1 * numDocs;
const conn = MongoRunner.runMongod({});
assert.neq(conn, null, "mongod failed to start");
const db = conn.getDB(dbName);
+
+// This test assumes that SBE is being used for most queries.
+if (!checkSBEEnabled(db, ["featureFlagSbeFull"])) {
+ jsTestLog("Skipping test because SBE is not fully enabled");
+ MongoRunner.stopMongod(conn);
+ return;
+}
const coll = db[collName];
// Gets the "allPlansExecution" section from the explain of a query that has zero results, but for
diff --git a/jstests/noPassthrough/sbe_pipeline_plan_cache_key_reporting.js b/jstests/noPassthrough/sbe_pipeline_plan_cache_key_reporting.js
deleted file mode 100644
index 5c3a0aa5401..00000000000
--- a/jstests/noPassthrough/sbe_pipeline_plan_cache_key_reporting.js
+++ /dev/null
@@ -1,144 +0,0 @@
-/**
- * Confirms that 'planCacheKey' and 'queryHash' are correctly reported when the query has $lookup
- * and $query stages with enabled and disabled SBE Plan Cache.
- */
-
-(function() {
-"use strict";
-
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-
-const databaseName = "pipeline_plan_cache_key_reporting";
-
-function isSBEEnabled() {
- const conn = MongoRunner.runMongod({});
- try {
- const db = conn.getDB(databaseName);
- return checkSBEEnabled(db);
- } finally {
- MongoRunner.stopMongod(conn);
- }
-}
-
-if (!isSBEEnabled()) {
- jsTest.log("Skipping test because SBE is not enabled.");
- return;
-}
-
-/**
- * Driver function that creates mongod instances with specified parameters and run the given test
- * cases.
- * @param {*} params to be passed to mongod in format like { setParameter:
- * "featureFlagSbePlanCache=true"}
- * @param {*} testCases a list of test cases where each test case is an object with 'setup(db)' and
- * 'run(db, assertMessage)' functions.
- * @returns results from 'testCase.run(db, assertMessage)'
- */
-function runTests(params, testCases) {
- let results = [];
- const conn = MongoRunner.runMongod(params);
- const db = conn.getDB(databaseName);
-
- const assertMessage = `${tojson(params)}`;
- try {
- for (let testCase of testCases) {
- testCase.setup(db);
- results.push(testCase.run(db, assertMessage));
- }
- } finally {
- MongoRunner.stopMongod(conn);
- }
- return results;
-}
-
-/**
- * This function validates given explain and return and object with extracted and validated
- * PlanCacheKey and QueryHash.
- * @returns {planCacheKey, queryHash, explain}
- */
-function processAndValidateExplain(explain, assertMessage) {
- assert.neq(explain, null);
- assert.eq(explain.explainVersion,
- "2",
- `[${assertMessage}] invalid explain version ${tojson(explain)}`);
-
- const planCacheKey = explain.queryPlanner.planCacheKey;
- validateKey(planCacheKey, `[${assertMessage}] Invalid planCacheKey: ${tojson(explain)}`);
-
- const queryHash = explain.queryPlanner.queryHash;
- validateKey(queryHash, `[${assertMessage}] Invalid queryHash: ${tojson(explain)}`);
-
- return {planCacheKey, queryHash, explain};
-}
-
-/**
- * Validates given 'key' (PlanCacheKey or QueryHash).
- */
-function validateKey(key, assertMessage) {
- assert.eq(typeof key, "string", assertMessage);
- assert.gt(key.length, 0, assertMessage);
-}
-
-// 1. Create test cases for $lookup and $group stages.
-const lookupTestCase = {
- setup: db => {
- db.coll.drop();
- assert.commandWorked(db.coll.createIndexes([{a: 1}, {a: 1, b: 1}]));
-
- db.lookupColl.drop();
- assert.commandWorked(db.lookupColl.createIndex({b: 1}));
- },
-
- run: (db, assertMessage) => {
- const pipeline = [
- {$lookup: {from: db.lookupColl.getName(), localField: "a", foreignField: "b", as: "w"}}
- ];
- const explain = db.coll.explain().aggregate(pipeline);
- return processAndValidateExplain(explain, assertMessage);
- },
-};
-
-const groupTestCase = {
- setup: db => {
- db.coll.drop();
- assert.commandWorked(db.coll.insertOne({a: 1}));
- },
-
- run: (db, assertMessage) => {
- const pipeline = [{
- $group: {
- _id: "$b",
- }
- }];
- const explain = db.coll.explain().aggregate(pipeline);
- return processAndValidateExplain(explain, assertMessage);
- },
-};
-
-const testCases = [lookupTestCase, groupTestCase];
-
-// 2. Run the test cases with SBE Plan Cache Enabled.
-const sbeParams = {
- setParameter: "featureFlagSbePlanCache=true"
-};
-const sbeKeys = runTests(sbeParams, testCases);
-assert.eq(testCases.length, sbeKeys.length);
-
-// 3. Run the test cases with SBE Plan Cache disabled.
-const classicParams = {
- setParameter: "featureFlagSbePlanCache=false"
-};
-const classicKeys = runTests(classicParams, testCases);
-assert.eq(testCases.length, classicKeys.length);
-
-// 4. Validate that PlanCacheKeys and QueryHash are equal. They should be different once
-// SERVER-61507 is completed.
-for (let i = 0; i < sbeKeys.length; ++i) {
- const sbe = sbeKeys[i];
- const classic = classicKeys[i];
-
- const message = `sbe=${tojson(sbe.explain)}, classic=${tojson(classic.explain)}`;
- assert.eq(sbe.planCacheKey, classic.planCacheKey, message);
- assert.eq(sbe.queryHash, classic.queryHash, message);
-}
-})();
diff --git a/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js b/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js
index 21ff98a1ae1..eecea2a2349 100644
--- a/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js
+++ b/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js
@@ -50,7 +50,7 @@ const db = conn.getDB(dbName);
// This test is specifically verifying the behavior of the SBE plan cache. So if either the SBE plan
// cache or SBE itself are disabled, bail out.
-if (!checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
+if (!checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
jsTestLog("Skipping test because either SBE engine or SBE plan cache are disabled");
MongoRunner.stopMongod(conn);
return;
diff --git a/jstests/noPassthrough/sbe_plan_cache_memory_debug_info.js b/jstests/noPassthrough/sbe_plan_cache_memory_debug_info.js
index df3ac0d3fd9..6a50b4db853 100644
--- a/jstests/noPassthrough/sbe_plan_cache_memory_debug_info.js
+++ b/jstests/noPassthrough/sbe_plan_cache_memory_debug_info.js
@@ -16,7 +16,7 @@ const conn = MongoRunner.runMongod({});
assert.neq(conn, null, "mongod failed to start");
const db = conn.getDB("sbe_plan_cache_memory_debug_info");
-if (!checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
+if (!checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
jsTest.log("Skipping test because SBE plan cache is not enabled.");
MongoRunner.stopMongod(conn);
return;
diff --git a/jstests/noPassthrough/sbe_plan_cache_size_metric.js b/jstests/noPassthrough/sbe_plan_cache_size_metric.js
index 4cec6ba7f78..ebfe8d5f0bb 100644
--- a/jstests/noPassthrough/sbe_plan_cache_size_metric.js
+++ b/jstests/noPassthrough/sbe_plan_cache_size_metric.js
@@ -44,7 +44,8 @@ function assertQueryInPlanCache(coll, query) {
assert.eq(1, planCacheEntries.length, planCacheEntries);
}
-const isSbePlanCacheEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache"]);
+const isSbePlanCacheEnabled =
+ checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"]);
if (isSbePlanCacheEnabled) {
const collectionName = "plan_cache_sbe";
const coll = db[collectionName];
diff --git a/jstests/noPassthrough/server_status_multiplanner.js b/jstests/noPassthrough/server_status_multiplanner.js
index f185cdb7841..42a7062e34c 100644
--- a/jstests/noPassthrough/server_status_multiplanner.js
+++ b/jstests/noPassthrough/server_status_multiplanner.js
@@ -13,6 +13,7 @@ function sumHistogramBucketCounts(histogram) {
}
load("jstests/libs/ftdc.js");
+load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'.
const collName = jsTestName();
const dbName = jsTestName();
@@ -22,6 +23,13 @@ const conn = MongoRunner.runMongod({});
assert.neq(conn, null, "mongod failed to start");
const db = conn.getDB(dbName);
+// This test assumes that SBE is being used for most queries.
+if (!checkSBEEnabled(db, ["featureFlagSbeFull"])) {
+ jsTestLog("Skipping test because SBE is not fully enabled");
+ MongoRunner.stopMongod(conn);
+ return;
+}
+
let coll = db.getCollection(collName);
coll.drop();
diff --git a/jstests/noPassthroughWithMongod/column_index_skeleton.js b/jstests/noPassthroughWithMongod/column_index_skeleton.js
index ba06608243e..368b9c7b3bc 100644
--- a/jstests/noPassthroughWithMongod/column_index_skeleton.js
+++ b/jstests/noPassthroughWithMongod/column_index_skeleton.js
@@ -9,19 +9,11 @@ load("jstests/libs/fail_point_util.js");
load("jstests/libs/analyze_plan.js");
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-const isSBEEnabled = checkSBEEnabled(db);
+const isSBEEnabled = checkSBEEnabled(db, ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"]);
if (!isSBEEnabled) {
// This test is only relevant when SBE is enabled.
- return;
-}
-
-const getParamResponse =
- assert.commandWorked(db.adminCommand({getParameter: 1, featureFlagColumnstoreIndexes: 1}));
-const columnstoreEnabled = getParamResponse.hasOwnProperty("featureFlagColumnstoreIndexes") &&
- getParamResponse.featureFlagColumnstoreIndexes.value;
-if (!columnstoreEnabled) {
- jsTestLog("Skipping columnstore test since the feature flag is not enabled.");
+ jsTestLog("Skipping columnstore test since either SBE or columnstore are disabled.");
return;
}
diff --git a/jstests/noPassthroughWithMongod/group_pushdown.js b/jstests/noPassthroughWithMongod/group_pushdown.js
index 43476441ef3..e541c2e2f75 100644
--- a/jstests/noPassthroughWithMongod/group_pushdown.js
+++ b/jstests/noPassthroughWithMongod/group_pushdown.js
@@ -46,25 +46,6 @@ let assertGroupPushdown = function(
assert.sameMembers(results, expectedResults);
};
-let assertProjectPushdown = function(
- {coll, pipeline, expectProjectToBePushedDown, options = {}} = {}) {
- const explain = coll.explain().aggregate(pipeline, options);
-
- let result;
- if (expectProjectToBePushedDown) {
- result = getWinningPlan(explain.queryPlanner);
- } else {
- result = getWinningPlan(explain.stages[0].$cursor.queryPlanner);
- }
-
- // Check that $project uses the query system.
- assert.eq(expectProjectToBePushedDown,
- planHasStage(db, result, "PROJECTION_DEFAULT") ||
- planHasStage(db, result, "PROJECTION_COVERED") ||
- planHasStage(db, result, "PROJECTION_SIMPLE"),
- explain);
-};
-
let assertNoGroupPushdown = function(coll, pipeline, expectedResults, options = {}) {
const explain = coll.explain().aggregate(pipeline, options);
assert.eq(null, getAggPlanStage(explain, "GROUP"), explain);
@@ -73,7 +54,7 @@ let assertNoGroupPushdown = function(coll, pipeline, expectedResults, options =
assert.sameMembers(resultNoGroupPushdown, expectedResults);
};
-let assertResultsMatchWithAndWithoutGroupPushdown = function(
+let assertResultsMatchWithAndWithoutPushdown = function(
coll, pipeline, expectedResults, expectedGroupCountInExplain) {
// Make sure the provided pipeline is eligible for pushdown.
assertGroupPushdown(coll, pipeline, expectedResults, expectedGroupCountInExplain);
@@ -93,27 +74,6 @@ let assertResultsMatchWithAndWithoutGroupPushdown = function(
assert.sameMembers(resultNoGroupPushdown, resultWithGroupPushdown);
};
-let assertResultsMatchWithAndWithoutProjectPushdown = function(
- {coll, pipeline, expectProjectToBePushedDown, expectedResults, options = {}} = {}) {
- // Make sure the provided pipeline is eligible for project pushdown.
- assertProjectPushdown(
- {coll: coll, pipeline: pipeline, expectProjectToBePushedDown: expectProjectToBePushedDown});
-
- // Turn sbe off.
- db.adminCommand({setParameter: 1, internalQueryForceClassicEngine: true});
-
- // Sanity check the results when no project pushdown happens.
- let resultNoProjectPushdown = coll.aggregate(pipeline).toArray();
- assert.sameMembers(resultNoProjectPushdown, expectedResults);
-
- // Turn sbe on which will allow $group stages that contain supported accumulators to be pushed
- // down under certain conditions.
- db.adminCommand({setParameter: 1, internalQueryForceClassicEngine: false});
-
- let resultWithProjectPushdown = coll.aggregate(pipeline).toArray();
- assert.sameMembers(resultNoProjectPushdown, resultWithProjectPushdown);
-};
-
let assertShardedGroupResultsMatch = function(coll, pipeline, expectedGroupCountInExplain = 1) {
const originalSBEEngineStatus =
assert
@@ -156,41 +116,38 @@ assert.eq(
[{"_id": 5, "item": "c", "price": 5, "quantity": 10, "date": ISODate("2014-02-15T09:05:00Z")}]);
// Run a simple $group with {$sum: 1} accumulator, and check if it gets pushed down.
-assertResultsMatchWithAndWithoutGroupPushdown(
+assertResultsMatchWithAndWithoutPushdown(
coll,
[{$group: {_id: "$item", c: {$sum: NumberInt(1)}}}],
[{_id: "a", c: NumberInt(2)}, {_id: "b", c: NumberInt(2)}, {_id: "c", c: NumberInt(1)}],
1);
-assertResultsMatchWithAndWithoutGroupPushdown(
+assertResultsMatchWithAndWithoutPushdown(
coll,
[{$group: {_id: "$item", c: {$sum: NumberLong(1)}}}],
[{_id: "a", c: NumberLong(2)}, {_id: "b", c: NumberLong(2)}, {_id: "c", c: NumberLong(1)}],
1);
-assertResultsMatchWithAndWithoutGroupPushdown(
- coll,
- [{$group: {_id: "$item", c: {$sum: 1}}}],
- [{_id: "a", c: 2}, {_id: "b", c: 2}, {_id: "c", c: 1}],
- 1);
+assertResultsMatchWithAndWithoutPushdown(coll,
+ [{$group: {_id: "$item", c: {$sum: 1}}}],
+ [{_id: "a", c: 2}, {_id: "b", c: 2}, {_id: "c", c: 1}],
+ 1);
// Run a simple $group with supported $sum accumulator, and check if it gets pushed down.
-assertResultsMatchWithAndWithoutGroupPushdown(
- coll,
- [{$group: {_id: "$item", s: {$sum: "$price"}}}],
- [{_id: "a", s: 15}, {_id: "b", s: 30}, {_id: "c", s: 5}],
- 1);
+assertResultsMatchWithAndWithoutPushdown(coll,
+ [{$group: {_id: "$item", s: {$sum: "$price"}}}],
+ [{_id: "a", s: 15}, {_id: "b", s: 30}, {_id: "c", s: 5}],
+ 1);
// The subexpression '$not' is not translated to $coerceToolBool and thus is SBE compatible.
-assertResultsMatchWithAndWithoutGroupPushdown(
- coll,
- [{$group: {_id: "$item", c: {$sum: {$not: "$price"}}}}],
- [{_id: "a", c: 0}, {_id: "b", c: 0}, {_id: "c", c: 0}],
- 1);
+assertResultsMatchWithAndWithoutPushdown(coll,
+ [{$group: {_id: "$item", c: {$sum: {$not: "$price"}}}}],
+ [{_id: "a", c: 0}, {_id: "b", c: 0}, {_id: "c", c: 0}],
+ 1);
// Two group stages both get pushed down and the second $group stage refer to only a top-level field
// which does not exist.
-assertResultsMatchWithAndWithoutGroupPushdown(
+assertResultsMatchWithAndWithoutPushdown(
coll,
[{$group: {_id: "$item", s: {$sum: "$price"}}}, {$group: {_id: "$quantity", c: {$count: {}}}}],
[{_id: null, c: 3}],
@@ -198,7 +155,7 @@ assertResultsMatchWithAndWithoutGroupPushdown(
// Two group stages both get pushed down and the second $group stage refers to only existing
// top-level fields of the first $group.
-assertResultsMatchWithAndWithoutGroupPushdown(
+assertResultsMatchWithAndWithoutPushdown(
coll,
[
{$group: {_id: "$item", qsum: {$sum: "$quantity"}, msum: {$sum: "$price"}}},
@@ -208,14 +165,14 @@ assertResultsMatchWithAndWithoutGroupPushdown(
2);
// The $group stage refers to the same top-level field twice.
-assertResultsMatchWithAndWithoutGroupPushdown(
+assertResultsMatchWithAndWithoutPushdown(
coll,
[{$group: {_id: "$item", ps1: {$sum: "$price"}, ps2: {$sum: "$price"}}}],
[{_id: "a", ps1: 15, ps2: 15}, {_id: "b", ps1: 30, ps2: 30}, {_id: "c", ps1: 5, ps2: 5}],
1);
// The $group stage refers to the same top-level field twice and another top-level field.
-assertResultsMatchWithAndWithoutGroupPushdown(
+assertResultsMatchWithAndWithoutPushdown(
coll,
[{
$group:
@@ -229,7 +186,7 @@ assertResultsMatchWithAndWithoutGroupPushdown(
1);
// The $group stage refers to two existing sub-fields.
-assertResultsMatchWithAndWithoutGroupPushdown(
+assertResultsMatchWithAndWithoutPushdown(
coll,
[
{$project: {item: 1, price: 1, quantity: 1, dateParts: {$dateToParts: {date: "$date"}}}},
@@ -244,7 +201,7 @@ assertResultsMatchWithAndWithoutGroupPushdown(
1);
// The $group stage refers to a non-existing sub-field twice.
-assertResultsMatchWithAndWithoutGroupPushdown(
+assertResultsMatchWithAndWithoutPushdown(
coll,
[{$group: {_id: "$item", hs: {$sum: {$add: ["$date.hour", "$date.hour"]}}}}],
[{"_id": "a", "hs": 0}, {"_id": "b", "hs": 0}, {"_id": "c", "hs": 0}],
@@ -287,12 +244,12 @@ assertResultsMatchWithAndWithoutGroupPushdown(
{$group: {_id: "$_id", ss: {$sum: {$add: ["$indexKeyPattern", "$indexKeyPattern"]}}}}
],
].forEach(pipeline =>
- assertResultsMatchWithAndWithoutGroupPushdown(
+ assertResultsMatchWithAndWithoutPushdown(
coll, pipeline, [{_id: "a", ss: 30}, {_id: "b", ss: 60}, {_id: "c", ss: 10}], 2));
// The second $group stage refers to both a top-level field and a sub-field twice which does not
// exist.
-assertResultsMatchWithAndWithoutGroupPushdown(
+assertResultsMatchWithAndWithoutPushdown(
coll,
[
{$group: {_id: "$item", ps: {$sum: "$price"}}},
@@ -306,7 +263,7 @@ assertResultsMatchWithAndWithoutGroupPushdown(
2);
// The second $group stage refers to a sub-field which does exist.
-assertResultsMatchWithAndWithoutGroupPushdown(
+assertResultsMatchWithAndWithoutPushdown(
coll,
[
{$group: {_id: {i: "$item", p: {$divide: ["$price", 5]}}}},
@@ -316,7 +273,7 @@ assertResultsMatchWithAndWithoutGroupPushdown(
2);
// Verifies that an optimized expression can be pushed down.
-assertResultsMatchWithAndWithoutGroupPushdown(
+assertResultsMatchWithAndWithoutPushdown(
coll,
// {"$ifNull": [1, 2]} will be optimized into just the constant 1.
[{$group: {_id: {"$ifNull": [1, 2]}, o: {$min: "$quantity"}}}],
@@ -338,75 +295,6 @@ assertGroupPushdown(coll,
[{$group: {_id: {"i": "$item"}, s: {$sum: "$price"}}}],
[{_id: {i: "a"}, s: 15}, {_id: {i: "b"}, s: 30}, {_id: {i: "c"}, s: 5}]);
-assertResultsMatchWithAndWithoutProjectPushdown({
- coll: coll,
- pipeline: [{$project: {x: "$item"}}],
- expectProjectToBePushedDown: true,
- expectedResults: [
- {"_id": 5, "x": "c"},
- {"_id": 4, "x": "b"},
- {"_id": 3, "x": "a"},
- {"_id": 2, "x": "b"},
- {"_id": 1, "x": "a"}
- ]
-});
-
-assertResultsMatchWithAndWithoutProjectPushdown({
- coll: coll,
- pipeline: [{$group: {_id: {"i": "$item"}, s: {$sum: "$price"}}}, {$project: {x: "$s"}}],
- expectProjectToBePushedDown: true,
- expectedResults:
- [{"_id": {"i": "b"}, "x": 30}, {"_id": {"i": "a"}, "x": 15}, {"_id": {"i": "c"}, "x": 5}]
-});
-
-assertResultsMatchWithAndWithoutProjectPushdown({
- coll: coll,
- pipeline: [
- {$group: {_id: "$item", s: {$sum: "$price"}}},
- {$project: {_id: 1, x: "$s"}},
- {$group: {_id: "$_id", total: {$sum: "$x"}}}
- ],
- expectProjectToBePushedDown: true,
- expectedResults:
- [{"_id": "a", "total": 15}, {"_id": "c", "total": 5}, {"_id": "b", "total": 30}]
-});
-
-assertResultsMatchWithAndWithoutProjectPushdown({
- coll: coll,
- pipeline: [
- {$group: {_id: {"i": "$item"}, s: {$sum: "$price"}}},
- {$addFields: {x: 1}},
- {$project: {s: 0}}
- ],
- expectProjectToBePushedDown: false,
- expectedResults:
- [{"_id": {"i": "c"}, "x": 1}, {"_id": {"i": "b"}, "x": 1}, {"_id": {"i": "a"}, "x": 1}]
-});
-
-assertResultsMatchWithAndWithoutProjectPushdown({
- coll: coll,
- pipeline: [
- {$group: {_id: {"i": "$item"}, s: {$sum: "$price"}}},
- {$addFields: {x: 1}},
- {$project: {s: 1}}
- ],
- expectProjectToBePushedDown: false,
- expectedResults:
- [{"_id": {"i": "c"}, "s": 5}, {"_id": {"i": "b"}, "s": 30}, {"_id": {"i": "a"}, "s": 15}]
-});
-
-assertResultsMatchWithAndWithoutProjectPushdown({
- coll: coll,
- pipeline: [
- {$match: {item: "a"}},
- {$sort: {price: 1}},
- {$group: {_id: "$item"}},
- {$project: {x: "$item"}}
- ],
- expectProjectToBePushedDown: true,
- expectedResults: [{"_id": "a"}]
-});
-
// Run a group with spilling on and check that $group is pushed down.
assertGroupPushdown(coll,
[{$group: {_id: "$item", s: {$sum: "$price"}}}],
@@ -507,19 +395,17 @@ assert.commandWorked(coll.insert(docs));
const verifyGroupPushdownWhenSubplanning = () => {
const matchWithOr = {$match: {$or: [{"item": "a"}, {"price": 10}]}};
const groupPushedDown = {$group: {_id: "$item", quantity: {$sum: "$quantity"}}};
- assertResultsMatchWithAndWithoutGroupPushdown(
- coll,
- [matchWithOr, groupPushedDown],
- [{_id: "a", quantity: 7}, {_id: "b", quantity: 10}],
- 1);
+ assertResultsMatchWithAndWithoutPushdown(coll,
+ [matchWithOr, groupPushedDown],
+ [{_id: "a", quantity: 7}, {_id: "b", quantity: 10}],
+ 1);
// A trival $and with only one $or will be optimized away and thus $or will be the top
// expression.
const matchWithTrivialAndOr = {$match: {$and: [{$or: [{"item": "a"}, {"price": 10}]}]}};
- assertResultsMatchWithAndWithoutGroupPushdown(
- coll,
- [matchWithTrivialAndOr, groupPushedDown],
- [{_id: "a", quantity: 7}, {_id: "b", quantity: 10}],
- 1);
+ assertResultsMatchWithAndWithoutPushdown(coll,
+ [matchWithTrivialAndOr, groupPushedDown],
+ [{_id: "a", quantity: 7}, {_id: "b", quantity: 10}],
+ 1);
};
// Verify that $group can be pushed down when subplanning is involved. With this test case,
@@ -551,7 +437,7 @@ assertNoGroupPushdown(
]);
// Verify that $bucket is pushed down to SBE and returns correct results.
-assertResultsMatchWithAndWithoutGroupPushdown(
+assertResultsMatchWithAndWithoutPushdown(
coll,
[{
$bucket:
@@ -559,7 +445,7 @@ assertResultsMatchWithAndWithoutGroupPushdown(
}],
[{"_id": 1, "quantity": 15}, {"_id": 10, "quantity": 13}]);
-assertResultsMatchWithAndWithoutGroupPushdown(
+assertResultsMatchWithAndWithoutPushdown(
coll,
[{
$bucket: {
@@ -571,7 +457,7 @@ assertResultsMatchWithAndWithoutGroupPushdown(
[{"_id": 1, "count": 5, "quantity": 28}]);
// Verify that $sortByCount is pushed down to SBE and returns correct results.
-assertResultsMatchWithAndWithoutGroupPushdown(
+assertResultsMatchWithAndWithoutPushdown(
coll,
[{$sortByCount: "$item"}],
[{"_id": "a", "count": 2}, {"_id": "b", "count": 2}, {"_id": "c", "count": 1}]);
diff --git a/jstests/noPassthroughWithMongod/index_bounds_static_limit.js b/jstests/noPassthroughWithMongod/index_bounds_static_limit.js
index 3918976b51f..ea0af1d1537 100644
--- a/jstests/noPassthroughWithMongod/index_bounds_static_limit.js
+++ b/jstests/noPassthroughWithMongod/index_bounds_static_limit.js
@@ -9,7 +9,7 @@
load("jstests/libs/analyze_plan.js"); // For explain helpers.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-const isSBEEnabled = checkSBEEnabled(db);
+const isSBEEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
if (!isSBEEnabled) {
// This test is only relevant when SBE is enabled.
diff --git a/jstests/noPassthroughWithMongod/ne_array_indexability.js b/jstests/noPassthroughWithMongod/ne_array_indexability.js
index b3556152939..8c96060dbf6 100644
--- a/jstests/noPassthroughWithMongod/ne_array_indexability.js
+++ b/jstests/noPassthroughWithMongod/ne_array_indexability.js
@@ -31,7 +31,7 @@ function runTest(queryToCache, queryToRunAfterCaching) {
// a different planCacheKey. The SBE plan cache, on the other hand, does not auto-parameterize
// $in or $eq involving a constant of type array, and therefore will consider the two queries to
// have different shapes.
- if (checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
+ if (checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
assert.neq(explain.queryPlanner.queryHash, cacheEntries[0].queryHash);
} else {
assert.eq(explain.queryPlanner.queryHash, cacheEntries[0].queryHash);
diff --git a/jstests/noPassthroughWithMongod/plan_cache_replanning.js b/jstests/noPassthroughWithMongod/plan_cache_replanning.js
index 12bb1a1176c..5a45226ad1f 100644
--- a/jstests/noPassthroughWithMongod/plan_cache_replanning.js
+++ b/jstests/noPassthroughWithMongod/plan_cache_replanning.js
@@ -10,7 +10,8 @@ load('jstests/libs/analyze_plan.js'); // For getPlanStage().
load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-const isSbePlanCacheEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache"]);
+const isSbePlanCacheEnabled =
+ checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"]);
let coll = assertDropAndRecreateCollection(db, "plan_cache_replanning");
diff --git a/jstests/noPassthroughWithMongod/sbe_agg_pushdown.js b/jstests/noPassthroughWithMongod/sbe_agg_pushdown.js
index c3f8d9259b2..6e9c341e424 100644
--- a/jstests/noPassthroughWithMongod/sbe_agg_pushdown.js
+++ b/jstests/noPassthroughWithMongod/sbe_agg_pushdown.js
@@ -7,7 +7,7 @@
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
-const isSBEEnabled = checkSBEEnabled(db);
+const isSBEEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
if (!isSBEEnabled) {
jsTestLog("Skipping test because the SBE feature flag is disabled");
return;
diff --git a/jstests/sharding/query/collation_lookup.js b/jstests/sharding/query/collation_lookup.js
index 57f4e8eeeed..8c479d8e12a 100644
--- a/jstests/sharding/query/collation_lookup.js
+++ b/jstests/sharding/query/collation_lookup.js
@@ -11,7 +11,6 @@
load("jstests/aggregation/extras/utils.js"); // for arrayEq
load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
-load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
function runTests(withDefaultCollationColl, withoutDefaultCollationColl, collation) {
// Test that the $lookup stage respects the inherited collation.
diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp
index fbba650d37d..d8f0c0b66a3 100644
--- a/src/mongo/db/pipeline/pipeline_d.cpp
+++ b/src/mongo/db/pipeline/pipeline_d.cpp
@@ -66,7 +66,6 @@
#include "mongo/db/pipeline/document_source_internal_unpack_bucket.h"
#include "mongo/db/pipeline/document_source_lookup.h"
#include "mongo/db/pipeline/document_source_match.h"
-#include "mongo/db/pipeline/document_source_project.h"
#include "mongo/db/pipeline/document_source_sample.h"
#include "mongo/db/pipeline/document_source_sample_from_random_cursor.h"
#include "mongo/db/pipeline/document_source_single_document_transformation.h"
@@ -110,24 +109,23 @@ namespace {
* pipeline to prepare for pushdown of $group and $lookup into the inner query layer so that it
* can be executed using SBE.
* Group stages are extracted from the pipeline when all of the following conditions are met:
- * 0. When the 'internalQueryForceClassicEngine' feature flag is 'false'.
- * 1. When 'allowDiskUse' is false. We currently don't support spilling in the SBE HashAgg
- * stage. This will change once that is supported when SERVER-58436 is complete.
- * 2. When the DocumentSourceGroup has 'doingMerge=false', this will change when we implement
- * hash table spilling in SERVER-58436.
+ * - When the 'internalQueryForceClassicEngine' feature flag is 'false'.
+ * - When the 'internalQuerySlotBasedExecutionDisableGroupPushdown' query knob is 'false'.
+ * - When the 'featureFlagSBEGroupPushdown' feature flag is 'true'.
+ * - When the DocumentSourceGroup has 'doingMerge=false'.
*
* Lookup stages are extracted from the pipeline when all of the following conditions are met:
- * 0. When the 'internalQueryForceClassicEngine' feature flag is 'false'.
- * 1. When the 'featureFlagSBELookupPushdown' feature flag is 'true'.
- * 2. The $lookup uses only the 'localField'/'foreignField' syntax (no pipelines).
- * 3. The foreign collection is neither sharded nor a view.
+ * - When the 'internalQueryForceClassicEngine' feature flag is 'false'.
+ * - When the 'internalQuerySlotBasedExecutionDisableLookupPushdown' query knob is 'false'.
+ * - When the 'featureFlagSBELookupPushdown' feature flag is 'true'.
+ * - The $lookup uses only the 'localField'/'foreignField' syntax (no pipelines).
+ * - The foreign collection is neither sharded nor a view.
*/
std::vector<std::unique_ptr<InnerPipelineStageInterface>> extractSbeCompatibleStagesForPushdown(
const intrusive_ptr<ExpressionContext>& expCtx,
const MultipleCollectionAccessor& collections,
const CanonicalQuery* cq,
- Pipeline* pipeline,
- const bool origSbeCompatible) {
+ Pipeline* pipeline) {
// We will eventually use the extracted group stages to populate 'CanonicalQuery::pipeline'
// which requires stages to be wrapped in an interface.
std::vector<std::unique_ptr<InnerPipelineStageInterface>> stagesForPushdown;
@@ -185,23 +183,6 @@ std::vector<std::unique_ptr<InnerPipelineStageInterface>> extractSbeCompatibleSt
break;
}
- // $project pushdown logic.
- if (auto projectStage =
- dynamic_cast<DocumentSourceSingleDocumentTransformation*>(itr->get())) {
- bool projectEligibleForPushdown = feature_flags::gFeatureFlagSBEGroupPushdown.isEnabled(
- serverGlobalParams.featureCompatibility) &&
- origSbeCompatible &&
- (projectStage->getType() ==
- TransformerInterface::TransformerType::kInclusionProjection);
-
- if (projectEligibleForPushdown) {
- stagesForPushdown.push_back(std::make_unique<InnerPipelineStageImpl>(projectStage));
- sources.erase(itr++);
- continue;
- }
- break;
- }
-
// $lookup pushdown logic.
if (auto lookupStage = dynamic_cast<DocumentSourceLookUp*>(itr->get())) {
if (disallowLookupPushdown) {
@@ -268,7 +249,6 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> attemptToGetExe
// Reset the 'sbeCompatible' flag before canonicalizing the 'findCommand' to potentially allow
// SBE to execute the portion of the query that's pushed down, even if the portion of the query
// that is not pushed down contains expressions not supported by SBE.
- bool origSbeCompatible = expCtx->sbeCompatible;
expCtx->sbeCompatible = true;
auto cq = CanonicalQuery::canonicalize(expCtx->opCtx,
@@ -321,16 +301,15 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> attemptToGetExe
}
auto permitYield = true;
- return getExecutorFind(
- expCtx->opCtx,
- collections,
- std::move(cq.getValue()),
- [&, origSbeCompatible](auto* canonicalQuery) {
- canonicalQuery->setPipeline(extractSbeCompatibleStagesForPushdown(
- expCtx, collections, canonicalQuery, pipeline, origSbeCompatible));
- },
- permitYield,
- plannerOpts);
+ return getExecutorFind(expCtx->opCtx,
+ collections,
+ std::move(cq.getValue()),
+ [&](auto* canonicalQuery) {
+ canonicalQuery->setPipeline(extractSbeCompatibleStagesForPushdown(
+ expCtx, collections, canonicalQuery, pipeline));
+ },
+ permitYield,
+ plannerOpts);
}
/**
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index f1a6be4d958..6f6db41882c 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -1295,13 +1295,8 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getSlotBasedExe
OperationContext* opCtx,
const MultipleCollectionAccessor& collections,
std::unique_ptr<CanonicalQuery> cq,
- std::function<void(CanonicalQuery*)> extractAndAttachPipelineStages,
PlanYieldPolicy::YieldPolicy requestedYieldPolicy,
size_t plannerOptions) {
- invariant(cq);
- if (extractAndAttachPipelineStages) {
- extractAndAttachPipelineStages(cq.get());
- }
// Mark that this query uses the SBE engine, unless this has already been set.
OpDebug& opDebug = CurOp::get(opCtx)->debug();
if (!opDebug.classicEngineUsed) {
@@ -1383,18 +1378,32 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutor(
std::function<void(CanonicalQuery*)> extractAndAttachPipelineStages,
PlanYieldPolicy::YieldPolicy yieldPolicy,
size_t plannerOptions) {
+ invariant(canonicalQuery);
const auto& mainColl = collections.getMainCollection();
canonicalQuery->setSbeCompatible(
sbe::isQuerySbeCompatible(&mainColl, canonicalQuery.get(), plannerOptions));
- return !canonicalQuery->getForceClassicEngine() && canonicalQuery->isSbeCompatible()
- ? getSlotBasedExecutor(opCtx,
- collections,
- std::move(canonicalQuery),
- extractAndAttachPipelineStages,
- yieldPolicy,
- plannerOptions)
- : getClassicExecutor(
- opCtx, mainColl, std::move(canonicalQuery), yieldPolicy, plannerOptions);
+
+ // Use SBE if 'canonicalQuery' is SBE compatible.
+ if (!canonicalQuery->getForceClassicEngine() && canonicalQuery->isSbeCompatible()) {
+ if (extractAndAttachPipelineStages) {
+ extractAndAttachPipelineStages(canonicalQuery.get());
+ }
+
+ // TODO SERVER-65960: Optionally refactor this logic once we have a mechanism to reattach
+ // pipeline stages.
+ // Use SBE if we find any $group/$lookup stages eligible for execution in SBE or if SBE
+ // is fully enabled. Otherwise, fallback to the classic engine.
+ if (canonicalQuery->pipeline().empty() &&
+ !feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
+ canonicalQuery->setSbeCompatible(false);
+ } else {
+ return getSlotBasedExecutor(
+ opCtx, collections, std::move(canonicalQuery), yieldPolicy, plannerOptions);
+ }
+ }
+
+ return getClassicExecutor(
+ opCtx, mainColl, std::move(canonicalQuery), yieldPolicy, plannerOptions);
}
StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutor(
diff --git a/src/mongo/db/query/query_feature_flags.idl b/src/mongo/db/query/query_feature_flags.idl
index f9b7ddb8ec8..7b48c713309 100644
--- a/src/mongo/db/query/query_feature_flags.idl
+++ b/src/mongo/db/query/query_feature_flags.idl
@@ -81,8 +81,7 @@ feature_flags:
featureFlagSbePlanCache:
description: "Feature flag for enabling use of the SBE plan cache"
cpp_varname: gFeatureFlagSbePlanCache
- default: true
- version: 6.0
+ default: false
featureFlagSortArray:
description: "Feature flag for allowing use of the $sortArray aggregation expression"
@@ -150,4 +149,9 @@ feature_flags:
featureFlagServerlessChangeStreams:
description: "Feature flag to enable reading change events from the change collection rather than the oplog"
cpp_varname: gFeatureFlagServerlessChangeStreams
+ default: false
+
+ featureFlagSbeFull:
+ description: "Feature flag to enable using SBE for a larger number of queries"
+ cpp_varname: gFeatureFlagSbeFull
default: false \ No newline at end of file
diff --git a/src/mongo/db/query/query_knobs.idl b/src/mongo/db/query/query_knobs.idl
index 2e8f4d7358c..18851f0ddb9 100644
--- a/src/mongo/db/query/query_knobs.idl
+++ b/src/mongo/db/query/query_knobs.idl
@@ -659,7 +659,7 @@ server_parameters:
set_at: [ startup, runtime ]
cpp_varname: "internalQueryForceClassicEngine"
cpp_vartype: AtomicWord<bool>
- default: true
+ default: false
internalQueryAppendIdToSetWindowFieldsSort:
description: "If true, appends _id to the sort stage generated by desugaring $setWindowFields to
diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp
index fa810499cb4..f7a36dd682a 100644
--- a/src/mongo/db/query/query_planner.cpp
+++ b/src/mongo/db/query/query_planner.cpp
@@ -46,7 +46,6 @@
#include "mongo/db/matcher/expression_text.h"
#include "mongo/db/pipeline/document_source_group.h"
#include "mongo/db/pipeline/document_source_lookup.h"
-#include "mongo/db/pipeline/document_source_single_document_transformation.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/classic_plan_cache.h"
#include "mongo/db/query/collation/collation_index_key.h"
@@ -56,8 +55,6 @@
#include "mongo/db/query/planner_access.h"
#include "mongo/db/query/planner_analysis.h"
#include "mongo/db/query/planner_ixselect.h"
-#include "mongo/db/query/projection_parser.h"
-#include "mongo/db/query/query_knobs_gen.h"
#include "mongo/db/query/query_planner_common.h"
#include "mongo/db/query/query_solution.h"
#include "mongo/logv2/log.h"
@@ -1389,26 +1386,6 @@ std::unique_ptr<QuerySolution> QueryPlanner::extendWithAggPipeline(
continue;
}
- auto projStage =
- dynamic_cast<DocumentSourceSingleDocumentTransformation*>(innerStage->documentSource());
- if (projStage) {
- auto projObj =
- projStage->getTransformer().serializeTransformation(boost::none).toBson();
- auto projAst =
- projection_ast::parseAndAnalyze(projStage->getContext(),
- projObj,
- ProjectionPolicies::aggregateProjectionPolicies());
-
- if (projAst.isSimple()) {
- solnForAgg = std::make_unique<ProjectionNodeSimple>(
- std::move(solnForAgg), *query.root(), projAst);
- } else {
- solnForAgg = std::make_unique<ProjectionNodeDefault>(
- std::move(solnForAgg), *query.root(), projAst);
- }
- continue;
- }
-
auto lookupStage = dynamic_cast<DocumentSourceLookUp*>(innerStage->documentSource());
if (lookupStage) {
tassert(6369000,
@@ -1430,8 +1407,7 @@ std::unique_ptr<QuerySolution> QueryPlanner::extendWithAggPipeline(
}
tasserted(5842400,
- "Cannot support pushdown of a stage other than $group $project or $lookup at the "
- "moment");
+ "Cannot support pushdown of a stage other than $group or $lookup at the moment");
}
solution->extendWith(std::move(solnForAgg));