summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJenny Peshansky <jenny.peshansky@mongodb.com>2021-05-06 10:53:44 -0400
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2021-05-07 18:31:22 +0000
commitfabe65934a353bbdd8c13c1efd0f2e064259debb (patch)
treee8b9e8d96c85618d5d9e1397654dec80983722ae
parentbce8de3a989d7b5aeb1c1145099298bd81a5c9bb (diff)
downloadmongo-fabe65934a353bbdd8c13c1efd0f2e064259debb.tar.gz
SERVER-55163 Add runtime-configurable setParameter for turning SBE on and off
-rw-r--r--jstests/libs/sbe_util.js9
-rw-r--r--jstests/noPassthrough/query_knobs_validation.js4
-rw-r--r--src/mongo/db/query/canonical_query.cpp1
-rw-r--r--src/mongo/db/query/canonical_query.h7
-rw-r--r--src/mongo/db/query/get_executor.cpp3
-rw-r--r--src/mongo/db/query/plan_cache.cpp3
-rw-r--r--src/mongo/db/query/plan_cache.h23
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp136
-rw-r--r--src/mongo/db/query/query_knobs.idl9
-rw-r--r--src/mongo/dbtests/query_stage_multiplan.cpp8
10 files changed, 145 insertions, 58 deletions
diff --git a/jstests/libs/sbe_util.js b/jstests/libs/sbe_util.js
index 78ceb7fa23a..9ebfdfa702b 100644
--- a/jstests/libs/sbe_util.js
+++ b/jstests/libs/sbe_util.js
@@ -32,9 +32,12 @@ function checkSBEEnabled(theDB) {
continue;
}
- const getParam = conn.adminCommand({getParameter: 1, featureFlagSBE: 1});
- checkResult =
- getParam.hasOwnProperty("featureFlagSBE") && getParam.featureFlagSBE.value;
+ const getParam = conn.adminCommand(
+ {getParameter: 1, featureFlagSBE: 1, internalQueryForceClassicEngine: 1});
+ checkResult = getParam.hasOwnProperty("featureFlagSBE") &&
+ getParam.featureFlagSBE.value &&
+ getParam.hasOwnProperty("internalQueryForceClassicEngine") &&
+ !getParam.internalQueryForceClassicEngine.value;
return true;
} catch (e) {
continue;
diff --git a/jstests/noPassthrough/query_knobs_validation.js b/jstests/noPassthrough/query_knobs_validation.js
index f296017a3ac..a1ad955fa6f 100644
--- a/jstests/noPassthrough/query_knobs_validation.js
+++ b/jstests/noPassthrough/query_knobs_validation.js
@@ -48,6 +48,7 @@ const expectedParamDefaults = {
internalQueryIgnoreUnknownJSONSchemaKeywords: false,
internalQueryProhibitBlockingMergeOnMongoS: false,
internalQuerySlotBasedExecutionMaxStaticIndexScanIntervals: 1000,
+ internalQueryForceClassicEngine: false,
};
function assertDefaultParameterValues() {
@@ -192,5 +193,8 @@ assertSetParameterSucceeds("internalQuerySlotBasedExecutionMaxStaticIndexScanInt
assertSetParameterFails("internalQuerySlotBasedExecutionMaxStaticIndexScanIntervals", 0);
assertSetParameterFails("internalQuerySlotBasedExecutionMaxStaticIndexScanIntervals", -1);
+assertSetParameterSucceeds("internalQueryForceClassicEngine", true);
+assertSetParameterSucceeds("internalQueryForceClassicEngine", false);
+
MongoRunner.stopMongod(conn);
})();
diff --git a/src/mongo/db/query/canonical_query.cpp b/src/mongo/db/query/canonical_query.cpp
index 99771b5f043..0c857b68afd 100644
--- a/src/mongo/db/query/canonical_query.cpp
+++ b/src/mongo/db/query/canonical_query.cpp
@@ -196,6 +196,7 @@ Status CanonicalQuery::init(OperationContext* opCtx,
_findCommand = std::move(findCommand);
_canHaveNoopMatchNodes = canHaveNoopMatchNodes;
+ _forceClassicEngine = internalQueryForceClassicEngine.load();
// Normalize and validate tree.
_root = MatchExpression::normalize(std::move(root));
diff --git a/src/mongo/db/query/canonical_query.h b/src/mongo/db/query/canonical_query.h
index e6d3d8c2171..5b7dcdf191f 100644
--- a/src/mongo/db/query/canonical_query.h
+++ b/src/mongo/db/query/canonical_query.h
@@ -220,6 +220,10 @@ public:
return _explain;
}
+ bool getForceClassicEngine() const {
+ return _forceClassicEngine;
+ }
+
void setExplain(bool explain) {
_explain = explain;
}
@@ -264,6 +268,9 @@ private:
bool _canHaveNoopMatchNodes = false;
bool _explain = false;
+
+ // Determines whether the classic engine must be used.
+ bool _forceClassicEngine = false;
};
} // namespace mongo
diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp
index 282f6bbc047..066507502c3 100644
--- a/src/mongo/db/query/get_executor.cpp
+++ b/src/mongo/db/query/get_executor.cpp
@@ -1201,7 +1201,8 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutor(
std::unique_ptr<CanonicalQuery> canonicalQuery,
PlanYieldPolicy::YieldPolicy yieldPolicy,
size_t plannerOptions) {
- return feature_flags::gSBE.isEnabledAndIgnoreFCV() &&
+ return !canonicalQuery->getForceClassicEngine() &&
+ feature_flags::gSBE.isEnabledAndIgnoreFCV() &&
isQuerySbeCompatible(opCtx, canonicalQuery.get(), plannerOptions)
? getSlotBasedExecutor(
opCtx, collection, std::move(canonicalQuery), yieldPolicy, plannerOptions)
diff --git a/src/mongo/db/query/plan_cache.cpp b/src/mongo/db/query/plan_cache.cpp
index b1bdb1f1ef5..4544c82ed02 100644
--- a/src/mongo/db/query/plan_cache.cpp
+++ b/src/mongo/db/query/plan_cache.cpp
@@ -690,7 +690,8 @@ PlanCacheKey PlanCache::computeKey(const CanonicalQuery& cq) const {
StringBuilder indexabilityKeyBuilder;
encodeIndexability(cq.root(), _indexabilityState, &indexabilityKeyBuilder);
- return PlanCacheKey(std::move(shapeString), indexabilityKeyBuilder.str());
+ return PlanCacheKey(
+ std::move(shapeString), indexabilityKeyBuilder.str(), cq.getForceClassicEngine());
}
StatusWith<std::unique_ptr<PlanCacheEntry>> PlanCache::getEntry(const CanonicalQuery& query) const {
diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h
index d913c9bb613..e7db19047ab 100644
--- a/src/mongo/db/query/plan_cache.h
+++ b/src/mongo/db/query/plan_cache.h
@@ -49,10 +49,13 @@ namespace mongo {
*/
class PlanCacheKey {
public:
- PlanCacheKey(CanonicalQuery::QueryShapeString shapeString, std::string indexabilityString) {
+ PlanCacheKey(CanonicalQuery::QueryShapeString shapeString,
+ std::string indexabilityString,
+ bool forceClassicQueryEngine) {
_lengthOfStablePart = shapeString.size();
_key = std::move(shapeString);
_key += indexabilityString;
+ _key += forceClassicQueryEngine ? "t" : "f";
}
CanonicalQuery::QueryShapeString getStableKey() const {
@@ -64,6 +67,15 @@ public:
}
/**
+ * Return the 'indexability discriminators', that is, the plan cache key component after the
+ * stable key, but before the boolean indicating whether we are using the classic engine.
+ */
+ StringData getIndexabilityDiscriminators() const {
+ return StringData(_key.c_str() + _lengthOfStablePart,
+ _key.size() - _lengthOfStablePart - 1);
+ }
+
+ /**
* Return the "unstable" portion of the key, which may vary across catalog changes.
*/
StringData getUnstablePart() const {
@@ -87,10 +99,11 @@ public:
}
private:
- // Key is broken into two parts:
- // <stable key> | <indexability discriminators>
- // Combined, the two parts make up the plan cache key. We store them in one std::string so that
- // we can easily/cheaply extract the stable key.
+ // Key is broken into three parts:
+ // <stable key> | <indexability discriminators> | <forceClassicQueryEngine boolean>
+ // This third part can be removed once the classic query engine reaches EOL.
+ // Combined, the three parts make up the plan cache key. We store them in one std::string so
+ // that we can easily/cheaply extract the stable key.
std::string _key;
// How long the "stable key" is.
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index d719c911d0e..d45befd0b54 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -50,6 +50,7 @@
#include "mongo/db/query/query_planner_test_lib.h"
#include "mongo/db/query/query_solution.h"
#include "mongo/db/query/query_test_service_context.h"
+#include "mongo/idl/server_parameter_test_util.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/assert_util.h"
#include "mongo/util/scopeguard.h"
@@ -199,14 +200,37 @@ unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
}
/**
- * Check that the stable keys of 'a' and 'b' are equal, but the unstable parts are not.
+ * Check that the stable keys of 'a' and 'b' are equal, but the index discriminators are not.
*/
void assertPlanCacheKeysUnequalDueToDiscriminators(const PlanCacheKey& a, const PlanCacheKey& b) {
ASSERT_EQ(a.getStableKeyStringData(), b.getStableKeyStringData());
- ASSERT_EQ(a.getUnstablePart().size(), b.getUnstablePart().size());
+ ASSERT_EQ(a.getIndexabilityDiscriminators().size(), b.getIndexabilityDiscriminators().size());
+ ASSERT_NE(a.getIndexabilityDiscriminators(), b.getIndexabilityDiscriminators());
+
// Should always have the begin and end delimiters.
- ASSERT_NE(a.getUnstablePart(), b.getUnstablePart());
- ASSERT_GTE(a.getUnstablePart().size(), 2u);
+ ASSERT_GTE(a.getIndexabilityDiscriminators().size(), 2u);
+}
+
+/**
+ * Check that the stable keys of 'a' and 'b' are equal, but the 'forceClassicEngine' values are not.
+ */
+void assertPlanCacheKeysUnequalDueToForceClassicEngineValue(const PlanCacheKey& a,
+ const PlanCacheKey& b) {
+ ASSERT_EQ(a.getStableKeyStringData(), b.getStableKeyStringData());
+ auto aUnstablePart = a.getUnstablePart();
+ auto bUnstablePart = b.getUnstablePart();
+
+ ASSERT_EQ(aUnstablePart.size(), aUnstablePart.size());
+
+ // Should have at least 1 byte to represent whether we must use the classic engine.
+ ASSERT_GTE(aUnstablePart.size(), 1);
+
+ // The indexability discriminators should match.
+ ASSERT_EQ(a.getIndexabilityDiscriminators(), b.getIndexabilityDiscriminators());
+
+ // The unstable parts should not match because of the last character.
+ ASSERT_NE(aUnstablePart, bUnstablePart);
+ ASSERT_NE(aUnstablePart[aUnstablePart.size() - 1], bUnstablePart[bUnstablePart.size() - 1]);
}
/**
@@ -1246,7 +1270,7 @@ protected:
};
const std::string mockKey("mock_cache_key");
-const PlanCacheKey CachePlanSelectionTest::ck(mockKey, "");
+const PlanCacheKey CachePlanSelectionTest::ck(mockKey, "", internalQueryForceClassicEngine.load());
//
// Equality
@@ -1901,8 +1925,8 @@ TEST(PlanCacheTest, ComputeKeyCollationIndex) {
// 'noStrings' gets a different key since it is compatible with the index.
assertPlanCacheKeysUnequalDueToDiscriminators(planCache.computeKey(*containsString),
planCache.computeKey(*noStrings));
- ASSERT_EQ(planCache.computeKey(*containsString).getUnstablePart(), "<0>");
- ASSERT_EQ(planCache.computeKey(*noStrings).getUnstablePart(), "<1>");
+ ASSERT_EQ(planCache.computeKey(*containsString).getIndexabilityDiscriminators(), "<0>");
+ ASSERT_EQ(planCache.computeKey(*noStrings).getIndexabilityDiscriminators(), "<1>");
// 'noStrings' and 'containsStringHasCollation' get different keys, since the collation
// specified in the query is considered part of its shape. However, they have the same index
@@ -1929,8 +1953,8 @@ TEST(PlanCacheTest, ComputeKeyCollationIndex) {
// 'inNoStrings' gets a different key since it is compatible with the index.
assertPlanCacheKeysUnequalDueToDiscriminators(planCache.computeKey(*inContainsString),
planCache.computeKey(*inNoStrings));
- ASSERT_EQ(planCache.computeKey(*inContainsString).getUnstablePart(), "<0>");
- ASSERT_EQ(planCache.computeKey(*inNoStrings).getUnstablePart(), "<1>");
+ ASSERT_EQ(planCache.computeKey(*inContainsString).getIndexabilityDiscriminators(), "<0>");
+ ASSERT_EQ(planCache.computeKey(*inNoStrings).getIndexabilityDiscriminators(), "<1>");
// 'inNoStrings' and 'inContainsStringHasCollation' get the same key since they compatible with
// the index.
@@ -1970,8 +1994,8 @@ TEST(PlanCacheTest, ComputeKeyWildcardIndex) {
planCacheWithNoIndexes.computeKey(*usesPathWithObject));
assertPlanCacheKeysUnequalDueToDiscriminators(planCache.computeKey(*usesPathWithScalar),
planCache.computeKey(*usesPathWithObject));
- ASSERT_EQ(planCache.computeKey(*usesPathWithScalar).getUnstablePart(), "<1>");
- ASSERT_EQ(planCache.computeKey(*usesPathWithObject).getUnstablePart(), "<0>");
+ ASSERT_EQ(planCache.computeKey(*usesPathWithScalar).getIndexabilityDiscriminators(), "<1>");
+ ASSERT_EQ(planCache.computeKey(*usesPathWithObject).getIndexabilityDiscriminators(), "<0>");
ASSERT_EQ(planCache.computeKey(*usesPathWithObject), planCache.computeKey(*usesPathWithArray));
ASSERT_EQ(planCache.computeKey(*usesPathWithObject),
@@ -1999,8 +2023,10 @@ TEST(PlanCacheTest, ComputeKeyWildcardIndex) {
assertPlanCacheKeysUnequalDueToDiscriminators(
planCache.computeKey(*orQueryWithOneBranchAllowed),
planCache.computeKey(*orQueryWithNoBranchesAllowed));
- ASSERT_EQ(planCache.computeKey(*orQueryWithOneBranchAllowed).getUnstablePart(), "<1><0>");
- ASSERT_EQ(planCache.computeKey(*orQueryWithNoBranchesAllowed).getUnstablePart(), "<0><0>");
+ ASSERT_EQ(planCache.computeKey(*orQueryWithOneBranchAllowed).getIndexabilityDiscriminators(),
+ "<1><0>");
+ ASSERT_EQ(planCache.computeKey(*orQueryWithNoBranchesAllowed).getIndexabilityDiscriminators(),
+ "<0><0>");
}
TEST(PlanCacheTest, ComputeKeyWildcardIndexDiscriminatesEqualityToEmptyObj) {
@@ -2014,16 +2040,16 @@ TEST(PlanCacheTest, ComputeKeyWildcardIndexDiscriminatesEqualityToEmptyObj) {
std::unique_ptr<CanonicalQuery> equalsNonEmptyObj(canonicalize("{a: {b: 1}}"));
assertPlanCacheKeysUnequalDueToDiscriminators(planCache.computeKey(*equalsEmptyObj),
planCache.computeKey(*equalsNonEmptyObj));
- ASSERT_EQ(planCache.computeKey(*equalsNonEmptyObj).getUnstablePart(), "<0>");
- ASSERT_EQ(planCache.computeKey(*equalsEmptyObj).getUnstablePart(), "<1>");
+ ASSERT_EQ(planCache.computeKey(*equalsNonEmptyObj).getIndexabilityDiscriminators(), "<0>");
+ ASSERT_EQ(planCache.computeKey(*equalsEmptyObj).getIndexabilityDiscriminators(), "<1>");
// $in with empty obj and $in with non-empty obj have different plan cache keys.
std::unique_ptr<CanonicalQuery> inWithEmptyObj(canonicalize("{a: {$in: [{}]}}"));
std::unique_ptr<CanonicalQuery> inWithNonEmptyObj(canonicalize("{a: {$in: [{b: 1}]}}"));
assertPlanCacheKeysUnequalDueToDiscriminators(planCache.computeKey(*inWithEmptyObj),
planCache.computeKey(*inWithNonEmptyObj));
- ASSERT_EQ(planCache.computeKey(*inWithNonEmptyObj).getUnstablePart(), "<0>");
- ASSERT_EQ(planCache.computeKey(*inWithEmptyObj).getUnstablePart(), "<1>");
+ ASSERT_EQ(planCache.computeKey(*inWithNonEmptyObj).getIndexabilityDiscriminators(), "<0>");
+ ASSERT_EQ(planCache.computeKey(*inWithEmptyObj).getIndexabilityDiscriminators(), "<1>");
}
TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyBasedOnPartialFilterExpression) {
@@ -2049,8 +2075,8 @@ TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyBasedOnPartialFilter
// The discriminator strings have the format "<xx>". That is, there are two discriminator
// bits for the "x" predicate, the first pertaining to the partialFilterExpression and the
// second around applicability to the wildcard index.
- ASSERT_EQ(compatibleKey.getUnstablePart(), "<11>");
- ASSERT_EQ(incompatibleKey.getUnstablePart(), "<01>");
+ ASSERT_EQ(compatibleKey.getIndexabilityDiscriminators(), "<11>");
+ ASSERT_EQ(incompatibleKey.getIndexabilityDiscriminators(), "<01>");
}
// The partialFilterExpression should lead to a discriminator over field 'x', but not over 'y'.
@@ -2065,8 +2091,8 @@ TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyBasedOnPartialFilter
// The discriminator strings have the format "<xx><y>". That is, there are two discriminator
// bits for the "x" predicate (the first pertaining to the partialFilterExpression, the
// second around applicability to the wildcard index) and one discriminator bit for "y".
- ASSERT_EQ(compatibleKey.getUnstablePart(), "<11><1>");
- ASSERT_EQ(incompatibleKey.getUnstablePart(), "<01><1>");
+ ASSERT_EQ(compatibleKey.getIndexabilityDiscriminators(), "<11><1>");
+ ASSERT_EQ(incompatibleKey.getIndexabilityDiscriminators(), "<01><1>");
}
// $eq:null predicates cannot be assigned to a wildcard index. Make sure that this is
@@ -2081,8 +2107,8 @@ TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyBasedOnPartialFilter
// The discriminator strings have the format "<xx><y>". That is, there are two discriminator
// bits for the "x" predicate (the first pertaining to the partialFilterExpression, the
// second around applicability to the wildcard index) and one discriminator bit for "y".
- ASSERT_EQ(compatibleKey.getUnstablePart(), "<11><1>");
- ASSERT_EQ(incompatibleKey.getUnstablePart(), "<11><0>");
+ ASSERT_EQ(compatibleKey.getIndexabilityDiscriminators(), "<11><1>");
+ ASSERT_EQ(incompatibleKey.getIndexabilityDiscriminators(), "<11><0>");
}
// Test that the discriminators are correct for an $eq:null predicate on 'x'. This predicate is
@@ -2091,7 +2117,7 @@ TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyBasedOnPartialFilter
// result in two "0" bits inside the discriminator string.
{
auto key = planCache.computeKey(*canonicalize("{x: {$eq: null}}"));
- ASSERT_EQ(key.getUnstablePart(), "<00>");
+ ASSERT_EQ(key.getIndexabilityDiscriminators(), "<00>");
}
}
@@ -2112,7 +2138,7 @@ TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyWithPartialFilterAnd
// discriminator because it is not referenced in the partial filter expression. All
// predicates are compatible.
auto key = planCache.computeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: 2}, z: {$eq: 3}}"));
- ASSERT_EQ(key.getUnstablePart(), "<11><11><1>");
+ ASSERT_EQ(key.getIndexabilityDiscriminators(), "<11><11><1>");
}
{
@@ -2120,7 +2146,7 @@ TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyWithPartialFilterAnd
// compatible with the partial filter expression, leading to one of the 'y' bits being set
// to zero.
auto key = planCache.computeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: -2}, z: {$eq: 3}}"));
- ASSERT_EQ(key.getUnstablePart(), "<11><01><1>");
+ ASSERT_EQ(key.getIndexabilityDiscriminators(), "<11><01><1>");
}
}
@@ -2139,14 +2165,14 @@ TEST(PlanCacheTest, ComputeKeyWildcardDiscriminatesCorrectlyWithPartialFilterOnN
// The discriminators have the format <x><(x.y)(x.y)<y>. All predicates are compatible
auto key =
planCache.computeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: 2}, 'x.y': {$eq: 3}}"));
- ASSERT_EQ(key.getUnstablePart(), "<1><11><1>");
+ ASSERT_EQ(key.getIndexabilityDiscriminators(), "<1><11><1>");
}
{
// Here, the predicate on "x.y" is not compatible with the partial filter expression.
auto key =
planCache.computeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: 2}, 'x.y': {$eq: -3}}"));
- ASSERT_EQ(key.getUnstablePart(), "<1><01><1>");
+ ASSERT_EQ(key.getIndexabilityDiscriminators(), "<1><01><1>");
}
}
@@ -2166,21 +2192,21 @@ TEST(PlanCacheTest, ComputeKeyDiscriminatesCorrectlyWithPartialFilterAndWildcard
// the predicate is compatible with the partial filter expression, whereas the disciminator
// for 'y' is about compatibility with the wildcard index.
auto key = planCache.computeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: 2}, z: {$eq: 3}}"));
- ASSERT_EQ(key.getUnstablePart(), "<1><1>");
+ ASSERT_EQ(key.getIndexabilityDiscriminators(), "<1><1>");
}
{
// Similar to the previous case, except with an 'x' predicate that is incompatible with the
// partial filter expression.
auto key = planCache.computeKey(*canonicalize("{x: {$eq: -1}, y: {$eq: 2}, z: {$eq: 3}}"));
- ASSERT_EQ(key.getUnstablePart(), "<0><1>");
+ ASSERT_EQ(key.getIndexabilityDiscriminators(), "<0><1>");
}
{
// Case where the 'y' predicate is not compatible with the wildcard index.
auto key =
planCache.computeKey(*canonicalize("{x: {$eq: 1}, y: {$eq: null}, z: {$eq: 3}}"));
- ASSERT_EQ(key.getUnstablePart(), "<1><0>");
+ ASSERT_EQ(key.getIndexabilityDiscriminators(), "<1><0>");
}
}
@@ -2189,7 +2215,7 @@ TEST(PlanCacheTest, StableKeyDoesNotChangeAcrossIndexCreation) {
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 0}}"));
const PlanCacheKey preIndexKey = planCache.computeKey(*cq);
const auto preIndexStableKey = preIndexKey.getStableKey();
- ASSERT_EQ(preIndexKey.getUnstablePart(), "");
+ ASSERT_EQ(preIndexKey.getIndexabilityDiscriminators(), "");
const auto keyPattern = BSON("a" << 1);
// Create a sparse index (which requires a discriminator).
@@ -2203,7 +2229,7 @@ TEST(PlanCacheTest, StableKeyDoesNotChangeAcrossIndexCreation) {
const auto postIndexStableKey = postIndexKey.getStableKey();
ASSERT_NE(preIndexKey, postIndexKey);
ASSERT_EQ(preIndexStableKey, postIndexStableKey);
- ASSERT_EQ(postIndexKey.getUnstablePart(), "<1>");
+ ASSERT_EQ(postIndexKey.getIndexabilityDiscriminators(), "<1>");
}
TEST(PlanCacheTest, ComputeKeyNotEqualsArray) {
@@ -2213,8 +2239,8 @@ TEST(PlanCacheTest, ComputeKeyNotEqualsArray) {
const PlanCacheKey noIndexNeArrayKey = planCache.computeKey(*cqNeArray);
const PlanCacheKey noIndexNeScalarKey = planCache.computeKey(*cqNeScalar);
- ASSERT_EQ(noIndexNeArrayKey.getUnstablePart(), "<0>");
- ASSERT_EQ(noIndexNeScalarKey.getUnstablePart(), "<1>");
+ ASSERT_EQ(noIndexNeArrayKey.getIndexabilityDiscriminators(), "<0>");
+ ASSERT_EQ(noIndexNeScalarKey.getIndexabilityDiscriminators(), "<1>");
ASSERT_EQ(noIndexNeScalarKey.getStableKey(), noIndexNeArrayKey.getStableKey());
const auto keyPattern = BSON("a" << 1);
@@ -2233,11 +2259,11 @@ TEST(PlanCacheTest, ComputeKeyNotEqualsArray) {
ASSERT_EQ(noIndexNeScalarKey.getStableKey(), withIndexNeScalarKey.getStableKey());
// There will be one discriminator for the $not and another for the leaf node ({$eq: 123}).
- ASSERT_EQ(withIndexNeScalarKey.getUnstablePart(), "<1><1>");
+ ASSERT_EQ(withIndexNeScalarKey.getIndexabilityDiscriminators(), "<1><1>");
// There will be one discriminator for the $not and another for the leaf node ({$eq: [1]}).
// Since the index can support equality to an array, the second discriminator will have a value
// of '1'.
- ASSERT_EQ(withIndexNeArrayKey.getUnstablePart(), "<0><1>");
+ ASSERT_EQ(withIndexNeArrayKey.getIndexabilityDiscriminators(), "<0><1>");
}
TEST(PlanCacheTest, ComputeKeyNinArray) {
@@ -2247,8 +2273,8 @@ TEST(PlanCacheTest, ComputeKeyNinArray) {
const PlanCacheKey noIndexNinArrayKey = planCache.computeKey(*cqNinArray);
const PlanCacheKey noIndexNinScalarKey = planCache.computeKey(*cqNinScalar);
- ASSERT_EQ(noIndexNinArrayKey.getUnstablePart(), "<0>");
- ASSERT_EQ(noIndexNinScalarKey.getUnstablePart(), "<1>");
+ ASSERT_EQ(noIndexNinArrayKey.getIndexabilityDiscriminators(), "<0>");
+ ASSERT_EQ(noIndexNinScalarKey.getIndexabilityDiscriminators(), "<1>");
ASSERT_EQ(noIndexNinScalarKey.getStableKey(), noIndexNinArrayKey.getStableKey());
const auto keyPattern = BSON("a" << 1);
@@ -2268,8 +2294,8 @@ TEST(PlanCacheTest, ComputeKeyNinArray) {
ASSERT_NE(noIndexNinArrayKey.getUnstablePart(), withIndexNinArrayKey.getUnstablePart());
ASSERT_EQ(noIndexNinScalarKey.getStableKey(), withIndexNinScalarKey.getStableKey());
- ASSERT_EQ(withIndexNinArrayKey.getUnstablePart(), "<0><1>");
- ASSERT_EQ(withIndexNinScalarKey.getUnstablePart(), "<1><1>");
+ ASSERT_EQ(withIndexNinArrayKey.getIndexabilityDiscriminators(), "<0><1>");
+ ASSERT_EQ(withIndexNinScalarKey.getIndexabilityDiscriminators(), "<1><1>");
}
// Test for a bug which would be easy to introduce. If we only inserted discriminators for some
@@ -2485,4 +2511,32 @@ TEST(PlanCacheTest, PlanCacheSizeWithMultiplePlanCaches) {
// Verify that size is reset to the original size after removing all entries.
ASSERT_EQ(PlanCacheEntry::planCacheTotalSizeEstimateBytes.get(), originalSize);
}
+
+TEST(PlanCacheTest, DifferentQueryEngines) {
+ // Helper to construct a plan cache key given 'forceClassicEngine'.
+ auto constructPlanCacheKey = [](const PlanCache& pc, bool forceClassicEngine) -> PlanCacheKey {
+ RAIIServerParameterControllerForTest controller{"internalQueryForceClassicEngine",
+ forceClassicEngine};
+ const auto queryStr = "{a: 0}";
+ unique_ptr<CanonicalQuery> cq(canonicalize(queryStr));
+ return pc.computeKey(*cq);
+ };
+
+ PlanCache planCache;
+ const auto keyPattern = BSON("a" << 1);
+
+ // Create a normal btree index. It will have a discriminator.
+ planCache.notifyOfIndexUpdates(
+ {CoreIndexInfo(keyPattern,
+ IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
+ false, // sparse
+ IndexEntry::Identifier{""})}); // name
+
+ const auto classicEngineKey = constructPlanCacheKey(planCache, true);
+ const auto noClassicEngineKey = constructPlanCacheKey(planCache, false);
+
+ // Check that the two plan cache keys are not equal because the plans were created under
+ // different engines.
+ assertPlanCacheKeysUnequalDueToForceClassicEngineValue(classicEngineKey, noClassicEngineKey);
+}
} // namespace
diff --git a/src/mongo/db/query/query_knobs.idl b/src/mongo/db/query/query_knobs.idl
index 4f606a45fa1..4b9a74ce3b2 100644
--- a/src/mongo/db/query/query_knobs.idl
+++ b/src/mongo/db/query/query_knobs.idl
@@ -301,7 +301,7 @@ server_parameters:
expr: 100 * 1024 * 1024
validator:
gt: 0
-
+
internalDocumentSourceSetWindowFieldsMaxMemoryBytes:
description: "Maximum size of the data that the $setWindowFields aggregation stage will cache in-memory before throwing an error."
set_at: [ startup, runtime ]
@@ -471,3 +471,10 @@ server_parameters:
validator:
gt: 0
lte: { expr: BSONObjMaxInternalSize }
+
+ internalQueryForceClassicEngine:
+ description: "If true, force use of the classic query engine."
+ set_at: [ startup, runtime ]
+ cpp_varname: "internalQueryForceClassicEngine"
+ cpp_vartype: AtomicWord<bool>
+ default: false
diff --git a/src/mongo/dbtests/query_stage_multiplan.cpp b/src/mongo/dbtests/query_stage_multiplan.cpp
index edd261b21af..bb47733e27e 100644
--- a/src/mongo/dbtests/query_stage_multiplan.cpp
+++ b/src/mongo/dbtests/query_stage_multiplan.cpp
@@ -58,6 +58,7 @@
#include "mongo/db/query/query_planner_test_lib.h"
#include "mongo/db/query/stage_builder_util.h"
#include "mongo/dbtests/dbtests.h"
+#include "mongo/idl/server_parameter_test_util.h"
#include "mongo/util/clock_source_mock.h"
namespace mongo {
@@ -550,12 +551,7 @@ TEST_F(QueryStageMultiPlanTest, MPSExplainAllPlans) {
//
// This is a regression test for SERVER-20111.
TEST_F(QueryStageMultiPlanTest, MPSSummaryStats) {
- // Bail out and do not run the tests if using the SBE engine.
- // TODO: SERVER-55163 once the feature flag is removed we should use the query configuration
- // knob to force the use of classic engine.
- if (feature_flags::gSBE.isEnabledAndIgnoreFCV()) {
- return;
- }
+ RAIIServerParameterControllerForTest controller("internalQueryForceClassicEngine", true);
const int N = 5000;
for (int i = 0; i < N; ++i) {