summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnton Korshunov <anton.korshunov@mongodb.com>2023-02-13 11:53:22 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2023-02-14 15:59:30 +0000
commita74810475928c34efbbb5beda556f9a902312e2c (patch)
treee682d6ba191011c005955031caf617fa08ec143c
parent37f6d07553bd5215866b22f6ec418b16b27e52be (diff)
downloadmongo-a74810475928c34efbbb5beda556f9a902312e2c.tar.gz
SERVER-71159 Include apiStrict flag into SBE plan cache key
(cherry picked from commit 1b27b42504f05c5c8502b83c4d489230eb85c048)
-rw-r--r--jstests/noPassthrough/sbe_plan_cache_api_version.js145
-rw-r--r--src/mongo/db/query/canonical_query_encoder.cpp17
-rw-r--r--src/mongo/db/query/canonical_query_encoder_test.cpp270
-rw-r--r--src/mongo/db/query/canonical_query_test_util.cpp79
-rw-r--r--src/mongo/db/query/canonical_query_test_util.h71
-rw-r--r--src/mongo/db/query/plan_cache_key_info_test.cpp55
-rw-r--r--src/mongo/db/query/plan_cache_test.cpp174
-rw-r--r--src/mongo/db/test_output/query/canonical_query_encoder_test/check_collation_is_encoded.txt4
-rw-r--r--src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key.txt80
-rw-r--r--src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_escaped.txt8
-rw-r--r--src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_geo_near.txt6
-rw-r--r--src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_match_in_depends_on_presence_of_regex_and_flags.txt30
-rw-r--r--src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_regex_depends_on_flags.txt24
-rw-r--r--src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e.txt50
-rw-r--r--src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e_with_pipeline.txt12
-rw-r--r--src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e_with_read_concern.txt6
-rw-r--r--src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_with_api_strict.txt8
-rw-r--r--src/mongo/db/test_output/query/canonical_query_encoder_test/encode_not_equal_null_predicates.txt10
18 files changed, 631 insertions, 418 deletions
diff --git a/jstests/noPassthrough/sbe_plan_cache_api_version.js b/jstests/noPassthrough/sbe_plan_cache_api_version.js
new file mode 100644
index 00000000000..35d77fb9095
--- /dev/null
+++ b/jstests/noPassthrough/sbe_plan_cache_api_version.js
@@ -0,0 +1,145 @@
+/**
+ * Validates the behaviour of the the SBE plan cache when the API version was provided to the
+ * aggregate command.
+ */
+
+(function() {
+"use strict";
+
+load("jstests/libs/analyze_plan.js");
+load("jstests/libs/sbe_util.js");
+
+const conn = MongoRunner.runMongod({});
+assert.neq(conn, null, "mongod failed to start");
+const db = conn.getDB("plan_cache_api_version");
+const coll = db.coll;
+coll.drop();
+
+const isSBEEnabled = checkSBEEnabled(db);
+
+assert.commandWorked(coll.insert([{a: 1, b: 1}, {a: 2, b: 2}]));
+
+// Runs the given pipeline with the specified options and returns its plan cache key.
+function runPipeline(pipeline, options, explainOptions = {}) {
+ const command = Object.assign({aggregate: coll.getName(), pipeline, cursor: {}}, options);
+ const result = coll.runCommand(command);
+ assert.commandWorked(result);
+ assert.eq(result.cursor.firstBatch.length, 1, result.cursor.firstBatch);
+
+ const explain = coll.runCommand(Object.assign({explain: command}, explainOptions));
+ assert.commandWorked(explain);
+ assert.neq(explain, null);
+ assert.eq(explain.explainVersion, isSBEEnabled ? "2" : "1", explain);
+ assert.neq(explain.queryPlanner.planCacheKey, null, explain);
+ return explain.queryPlanner.planCacheKey;
+}
+
+// Runs the given 'pipeline' with the API version and returns its plan cache key.
+function runPipelineWithApiVersion(pipeline) {
+ const options = {apiVersion: '1'};
+ return runPipeline(pipeline, options, options);
+}
+
+// Runs the given 'pipeline' with the API version and 'apiStrict: true' and returns its plan cache
+// key.
+function runPipelineWithApiStrict(pipeline) {
+ const options = {apiVersion: '1', apiStrict: true};
+ return runPipeline(pipeline, options, options);
+}
+
+// Asserts that a plan cache entry for the given 'cacheKey' exists in the plan cache and has
+// certain properties set as per provided 'properties' argument.
+function assertPlanCacheEntryExists(cacheKey, properties = {}) {
+ const entries =
+ coll.aggregate([{$planCacheStats: {}}, {$match: {planCacheKey: cacheKey}}]).toArray();
+ assert.eq(entries.length, 1, entries);
+ const entry = entries[0];
+
+ if (isSBEEnabled) {
+ // The version:"2" field indicates that this is an SBE plan cache entry.
+ assert.eq(entry.version, "2", entry);
+ assert.eq(entry.isActive, properties.isActive, entry);
+ assert.eq(entry.isPinned, properties.isPinned, entry);
+ } else {
+ // The version:"1" field indicates that this is an classic plan cache entry.
+ assert.eq(entry.version, "1", entry);
+ assert.eq(entry.isActive, properties.isActive, entry);
+ }
+}
+
+const pipeline = [{$match: {a: 1, b: 1}}];
+
+// Run a set of testcases where each testcase defines a set of indexes on the collection and
+// executes the above pipeline with and without the API strict flag. Assert that the plan cache
+// keys for each of the two queries are different and two different plan cache entries have been
+// created.
+
+const sbeEngineTestcases = [
+ {
+ withApiVersion: {isActive: true, isPinned: true},
+ withApiStrict: {isActive: true, isPinned: true},
+ indexSpecs: []
+ },
+ {
+ withApiVersion: {isActive: true, isPinned: true},
+ withApiStrict: {isActive: true, isPinned: true},
+ indexSpecs: [{keyPattern: {a: 1}, options: {name: "a_1"}}]
+ },
+ {
+ withApiVersion: {isActive: false, isPinned: false},
+ withApiStrict: {isActive: true, isPinned: true},
+ indexSpecs: [
+ {keyPattern: {a: 1}, options: {name: "a_1"}},
+ {keyPattern: {a: 1}, options: {name: "a_1_sparse", sparse: true}}
+ ]
+ }
+];
+
+const classicEngineTestcases = [
+ {
+ withApiVersion: {isActive: false},
+ withApiStrict: {isActive: false},
+ indexSpecs: [
+ {keyPattern: {a: 1}, options: {name: "a_1"}},
+ {keyPattern: {b: 1}, options: {name: "b_1"}}
+ ]
+ },
+ {
+ withApiVersion: {isActive: false},
+ withApiStrict: {isActive: false},
+ indexSpecs: [
+ {keyPattern: {a: 1}, options: {name: "a_1"}},
+ {keyPattern: {a: 1}, options: {name: "a_1_sparse", sparse: true}},
+ {keyPattern: {b: 1}, options: {name: "b_1"}}
+ ]
+ }
+];
+
+const testcases = isSBEEnabled ? sbeEngineTestcases : classicEngineTestcases;
+for (const testcase of testcases) {
+ [true, false].forEach((runWithApiStrictFirst) => {
+ assert.commandWorked(coll.dropIndexes());
+
+ for (const indexSpec of testcase.indexSpecs) {
+ assert.commandWorked(coll.createIndex(indexSpec.keyPattern, indexSpec.options));
+ }
+
+ let planCacheKeyWithApiVersion;
+ let planCacheKeyWithApiStrict;
+
+ if (runWithApiStrictFirst) {
+ planCacheKeyWithApiStrict = runPipelineWithApiStrict(pipeline);
+ planCacheKeyWithApiVersion = runPipelineWithApiVersion(pipeline);
+ } else {
+ planCacheKeyWithApiVersion = runPipelineWithApiVersion(pipeline);
+ planCacheKeyWithApiStrict = runPipelineWithApiStrict(pipeline);
+ }
+
+ assert.neq(planCacheKeyWithApiVersion, planCacheKeyWithApiStrict);
+ assertPlanCacheEntryExists(planCacheKeyWithApiVersion, testcase.withApiVersion);
+ assertPlanCacheEntryExists(planCacheKeyWithApiStrict, testcase.withApiStrict);
+ });
+}
+
+MongoRunner.stopMongod(conn);
+}());
diff --git a/src/mongo/db/query/canonical_query_encoder.cpp b/src/mongo/db/query/canonical_query_encoder.cpp
index 4313b6efc2b..d86eb8ab37b 100644
--- a/src/mongo/db/query/canonical_query_encoder.cpp
+++ b/src/mongo/db/query/canonical_query_encoder.cpp
@@ -86,7 +86,7 @@ const char kEncodeProjectionSection = '|';
const char kEncodeProjectionRequirementSeparator = '-';
const char kEncodeRegexFlagsSeparator = '/';
const char kEncodeSortSection = '~';
-const char kEncodeEngineSection = '@';
+const char kEncodeFlagsSection = '@';
const char kEncodePipelineSection = '^';
// These special bytes are used in the encoding of auto-parameterized match expressions in the SBE
@@ -134,7 +134,7 @@ void encodeUserString(StringData s, BuilderType* builder) {
case kEncodeProjectionRequirementSeparator:
case kEncodeRegexFlagsSeparator:
case kEncodeSortSection:
- case kEncodeEngineSection:
+ case kEncodeFlagsSection:
case kEncodeParamMarker:
case kEncodeConstantLiteralMarker:
case kEncodePipelineSection:
@@ -701,7 +701,13 @@ CanonicalQuery::QueryShapeString encode(const CanonicalQuery& cq) {
// This encoding can be removed once the classic query engine reaches EOL and SBE is used
// exclusively for all query execution.
- keyBuilder << kEncodeEngineSection << (cq.getForceClassicEngine() ? "f" : "t");
+ keyBuilder << kEncodeFlagsSection << (cq.getForceClassicEngine() ? "f" : "t");
+
+ // The apiStrict flag can cause the query to see different set of indexes. For example, all
+ // sparse indexes will be ignored with apiStrict is used.
+ const bool apiStrict =
+ cq.getOpCtx() && APIParameters::get(cq.getOpCtx()).getAPIStrict().value_or(false);
+ keyBuilder << (apiStrict ? "t" : "f");
return keyBuilder.str();
}
@@ -1114,6 +1120,11 @@ std::string encodeSBE(const CanonicalQuery& cq) {
bufBuilder.appendStr(strBuilderEncoded, false /* includeEndingNull */);
bufBuilder.appendChar(cq.getForceGenerateRecordId() ? 1 : 0);
bufBuilder.appendChar(cq.isCountLike() ? 1 : 0);
+ // The apiStrict flag can cause the query to see different set of indexes. For example, all
+ // sparse indexes will be ignored with apiStrict is used.
+ const bool apiStrict =
+ cq.getOpCtx() && APIParameters::get(cq.getOpCtx()).getAPIStrict().value_or(false);
+ bufBuilder.appendChar(apiStrict ? 1 : 0);
encodeFindCommandRequest(cq.getFindCommandRequest(), &bufBuilder);
diff --git a/src/mongo/db/query/canonical_query_encoder_test.cpp b/src/mongo/db/query/canonical_query_encoder_test.cpp
index 12593f56490..cc700609ffa 100644
--- a/src/mongo/db/query/canonical_query_encoder_test.cpp
+++ b/src/mongo/db/query/canonical_query_encoder_test.cpp
@@ -35,6 +35,7 @@
#include "mongo/db/pipeline/expression_context_for_test.h"
#include "mongo/db/pipeline/inner_pipeline_stage_impl.h"
#include "mongo/db/query/canonical_query.h"
+#include "mongo/db/query/canonical_query_test_util.h"
#include "mongo/db/query/plan_cache_key_factory.h"
#include "mongo/db/query/query_test_service_context.h"
#include "mongo/idl/server_parameter_test_util.h"
@@ -47,7 +48,6 @@ namespace {
using std::unique_ptr;
-static const NamespaceString nss("testdb.testcoll");
static const NamespaceString foreignNss("testdb.foreigncoll");
unittest::GoldenTestConfig goldenTestConfig{"src/mongo/db/test_output/query"};
@@ -63,117 +63,119 @@ std::vector<std::unique_ptr<InnerPipelineStageInterface>> parsePipeline(
return stages;
}
-/**
- * Utility functions to create a CanonicalQuery
- */
-unique_ptr<CanonicalQuery> canonicalize(BSONObj query,
- BSONObj sort,
- BSONObj proj,
- BSONObj collation,
- std::unique_ptr<FindCommandRequest> findCommand = nullptr,
- std::vector<BSONObj> pipelineObj = {},
- bool isCountLike = false) {
- QueryTestServiceContext serviceContext;
- auto opCtx = serviceContext.makeOperationContext();
-
- if (!findCommand) {
- findCommand = std::make_unique<FindCommandRequest>(nss);
+class CanonicalQueryEncoderTest : public CanonicalQueryTest {
+protected:
+ unique_ptr<CanonicalQuery> canonicalize(
+ OperationContext* opCtx,
+ BSONObj query,
+ BSONObj sort,
+ BSONObj proj,
+ BSONObj collation,
+ std::unique_ptr<FindCommandRequest> findCommand = nullptr,
+ std::vector<BSONObj> pipelineObj = {},
+ bool isCountLike = false) {
+ if (!findCommand) {
+ findCommand = std::make_unique<FindCommandRequest>(nss);
+ }
+ findCommand->setFilter(query.getOwned());
+ findCommand->setSort(sort.getOwned());
+ findCommand->setProjection(proj.getOwned());
+ findCommand->setCollation(collation.getOwned());
+
+ const auto expCtx = make_intrusive<ExpressionContextForTest>(opCtx, nss);
+ expCtx->addResolvedNamespaces({foreignNss});
+ if (!findCommand->getCollation().isEmpty()) {
+ auto statusWithCollator = CollatorFactoryInterface::get(opCtx->getServiceContext())
+ ->makeFromBSON(findCommand->getCollation());
+ ASSERT_OK(statusWithCollator.getStatus());
+ expCtx->setCollator(std::move(statusWithCollator.getValue()));
+ }
+ auto pipeline = parsePipeline(expCtx, pipelineObj);
+
+ auto statusWithCQ =
+ CanonicalQuery::canonicalize(opCtx,
+ std::move(findCommand),
+ false,
+ expCtx,
+ ExtensionsCallbackNoop(),
+ MatchExpressionParser::kAllowAllSpecialFeatures,
+ ProjectionPolicies::findProjectionPolicies(),
+ std::move(pipeline),
+ isCountLike);
+ ASSERT_OK(statusWithCQ.getStatus());
+ return std::move(statusWithCQ.getValue());
}
- findCommand->setFilter(query.getOwned());
- findCommand->setSort(sort.getOwned());
- findCommand->setProjection(proj.getOwned());
- findCommand->setCollation(collation.getOwned());
-
- const auto expCtx = make_intrusive<ExpressionContextForTest>(opCtx.get(), nss);
- expCtx->addResolvedNamespaces({foreignNss});
- if (!findCommand->getCollation().isEmpty()) {
- auto statusWithCollator = CollatorFactoryInterface::get(opCtx->getServiceContext())
- ->makeFromBSON(findCommand->getCollation());
- ASSERT_OK(statusWithCollator.getStatus());
- expCtx->setCollator(std::move(statusWithCollator.getValue()));
- }
- auto pipeline = parsePipeline(expCtx, pipelineObj);
-
- auto statusWithCQ =
- CanonicalQuery::canonicalize(opCtx.get(),
- std::move(findCommand),
- false,
- expCtx,
- ExtensionsCallbackNoop(),
- MatchExpressionParser::kAllowAllSpecialFeatures,
- ProjectionPolicies::findProjectionPolicies(),
- std::move(pipeline),
- isCountLike);
- ASSERT_OK(statusWithCQ.getStatus());
- return std::move(statusWithCQ.getValue());
-}
-
-unique_ptr<CanonicalQuery> canonicalize(const char* queryStr) {
- BSONObj queryObj = fromjson(queryStr);
- return canonicalize(queryObj, {}, {}, {});
-}
-
-/**
- * Test functions for computeKey, when no indexes are present. Cache keys are intentionally
- * obfuscated and are meaningful only within the current lifetime of the server process. Users
- * should treat plan cache keys as opaque.
- */
-void testComputeKey(unittest::GoldenTestContext& gctx, const CanonicalQuery& cq) {
- gctx.outStream() << "==== VARIATION: cq=" << cq.toString() << std::endl;
- const auto key = cq.encodeKey();
- gctx.outStream() << key << std::endl;
-}
+ unique_ptr<CanonicalQuery> canonicalize(OperationContext* opCtx, const char* queryStr) {
+ BSONObj queryObj = fromjson(queryStr);
+ return canonicalize(opCtx, queryObj, {}, {}, {});
+ }
-void testComputeKey(unittest::GoldenTestContext& gctx, BSONObj query, BSONObj sort, BSONObj proj) {
- BSONObj collation;
- gctx.outStream() << "==== VARIATION: query=" << query << ", sort=" << sort << ", proj=" << proj
- << std::endl;
- unique_ptr<CanonicalQuery> cq(canonicalize(query, sort, proj, collation));
- const auto key = cq->encodeKey();
- gctx.outStream() << key << std::endl;
-}
+ /**
+ * Test functions for computeKey, when no indexes are present. Cache keys are intentionally
+ * obfuscated and are meaningful only within the current lifetime of the server process. Users
+ * should treat plan cache keys as opaque.
+ */
+ void testComputeKey(unittest::GoldenTestContext& gctx, const CanonicalQuery& cq) {
+ gctx.outStream() << "==== VARIATION: cq=" << cq.toString() << std::endl;
+ const auto key = cq.encodeKey();
+ gctx.outStream() << key << std::endl;
+ }
-void testComputeKey(unittest::GoldenTestContext& gctx,
- const char* queryStr,
- const char* sortStr,
- const char* projStr) {
- testComputeKey(gctx, fromjson(queryStr), fromjson(sortStr), fromjson(projStr));
-}
+ void testComputeKey(unittest::GoldenTestContext& gctx,
+ BSONObj query,
+ BSONObj sort,
+ BSONObj proj) {
+ BSONObj collation;
+ gctx.outStream() << "==== VARIATION: query=" << query << ", sort=" << sort
+ << ", proj=" << proj << std::endl;
+ unique_ptr<CanonicalQuery> cq(canonicalize(opCtx(), query, sort, proj, collation));
+ const auto key = cq->encodeKey();
+ gctx.outStream() << key << std::endl;
+ }
-void testComputeSBEKey(unittest::GoldenTestContext& gctx,
- const char* queryStr,
- const char* sortStr,
- const char* projStr,
- std::unique_ptr<FindCommandRequest> findCommand = nullptr,
- std::vector<BSONObj> pipelineObj = {},
- bool isCountLike = false) {
- auto& stream = gctx.outStream();
- stream << "==== VARIATION: sbe, query=" << queryStr << ", sort=" << sortStr
- << ", proj=" << projStr;
- if (findCommand) {
- stream << ", allowDiskUse=" << findCommand->getAllowDiskUse()
- << ", returnKey=" << findCommand->getReturnKey()
- << ", requestResumeToken=" << findCommand->getRequestResumeToken();
+ void testComputeKey(unittest::GoldenTestContext& gctx,
+ const char* queryStr,
+ const char* sortStr,
+ const char* projStr) {
+ testComputeKey(gctx, fromjson(queryStr), fromjson(sortStr), fromjson(projStr));
}
- if (isCountLike) {
- stream << ", isCountLike=true";
+
+ void testComputeSBEKey(unittest::GoldenTestContext& gctx,
+ const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ std::unique_ptr<FindCommandRequest> findCommand = nullptr,
+ std::vector<BSONObj> pipelineObj = {},
+ bool isCountLike = false) {
+ auto& stream = gctx.outStream();
+ stream << "==== VARIATION: sbe, query=" << queryStr << ", sort=" << sortStr
+ << ", proj=" << projStr;
+ if (findCommand) {
+ stream << ", allowDiskUse=" << findCommand->getAllowDiskUse()
+ << ", returnKey=" << findCommand->getReturnKey()
+ << ", requestResumeToken=" << findCommand->getRequestResumeToken();
+ }
+ if (isCountLike) {
+ stream << ", isCountLike=true";
+ }
+ stream << std::endl;
+ BSONObj collation;
+ unique_ptr<CanonicalQuery> cq(canonicalize(opCtx(),
+ fromjson(queryStr),
+ fromjson(sortStr),
+ fromjson(projStr),
+ collation,
+ std::move(findCommand),
+ std::move(pipelineObj),
+ isCountLike));
+ cq->setSbeCompatible(true);
+ const auto key = canonical_query_encoder::encodeSBE(*cq);
+ gctx.outStream() << key << std::endl;
}
- stream << std::endl;
- BSONObj collation;
- unique_ptr<CanonicalQuery> cq(canonicalize(fromjson(queryStr),
- fromjson(sortStr),
- fromjson(projStr),
- collation,
- std::move(findCommand),
- std::move(pipelineObj),
- isCountLike));
- cq->setSbeCompatible(true);
- const auto key = canonical_query_encoder::encodeSBE(*cq);
- gctx.outStream() << key << std::endl;
-}
+};
-TEST(CanonicalQueryEncoderTest, ComputeKey) {
+TEST_F(CanonicalQueryEncoderTest, ComputeKey) {
unittest::GoldenTestContext gctx(&goldenTestConfig);
// Generated cache keys should be treated as opaque to the user.
@@ -250,7 +252,7 @@ TEST(CanonicalQueryEncoderTest, ComputeKey) {
testComputeKey(gctx, "{$or: [{a: 1}]}", "{}", "{'a.$': 1}");
}
-TEST(CanonicalQueryEncoderTest, EncodeNotEqualNullPredicates) {
+TEST_F(CanonicalQueryEncoderTest, EncodeNotEqualNullPredicates) {
unittest::GoldenTestContext gctx(&goldenTestConfig);
// The computed key depends on which execution engine is enabled. As such, we disable SBE for
// this test so that the test doesn't break should the default value of
@@ -270,7 +272,7 @@ TEST(CanonicalQueryEncoderTest, EncodeNotEqualNullPredicates) {
// Delimiters found in user field names or non-standard projection field values
// must be escaped.
-TEST(CanonicalQueryEncoderTest, ComputeKeyEscaped) {
+TEST_F(CanonicalQueryEncoderTest, ComputeKeyEscaped) {
unittest::GoldenTestContext gctx(&goldenTestConfig);
// The computed key depends on which execution engine is enabled. As such, we disable SBE for
// this test so that the test doesn't break should the default value of
@@ -292,23 +294,23 @@ TEST(CanonicalQueryEncoderTest, ComputeKeyEscaped) {
// Cache keys for $geoWithin queries with legacy and GeoJSON coordinates should
// not be the same.
-TEST(CanonicalQueryEncoderTest, ComputeKeyGeoWithin) {
+TEST_F(CanonicalQueryEncoderTest, ComputeKeyGeoWithin) {
// Legacy coordinates.
- unique_ptr<CanonicalQuery> cqLegacy(
- canonicalize("{a: {$geoWithin: "
- "{$box: [[-180, -90], [180, 90]]}}}"));
+ unique_ptr<CanonicalQuery> cqLegacy(canonicalize(opCtx(),
+ "{a: {$geoWithin: "
+ "{$box: [[-180, -90], [180, 90]]}}}"));
// GeoJSON coordinates.
- unique_ptr<CanonicalQuery> cqNew(
- canonicalize("{a: {$geoWithin: "
- "{$geometry: {type: 'Polygon', coordinates: "
- "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
+ unique_ptr<CanonicalQuery> cqNew(canonicalize(opCtx(),
+ "{a: {$geoWithin: "
+ "{$geometry: {type: 'Polygon', coordinates: "
+ "[[[0, 0], [0, 90], [90, 0], [0, 0]]]}}}}"));
ASSERT_NOT_EQUALS(canonical_query_encoder::encode(*cqLegacy),
canonical_query_encoder::encode(*cqNew));
}
// GEO_NEAR cache keys should include information on geometry and CRS in addition
// to the match type and field name.
-TEST(CanonicalQueryEncoderTest, ComputeKeyGeoNear) {
+TEST_F(CanonicalQueryEncoderTest, ComputeKeyGeoNear) {
unittest::GoldenTestContext gctx(&goldenTestConfig);
// The computed key depends on which execution engine is enabled. As such, we disable SBE for
// this test so that the test doesn't break should the default value of
@@ -325,7 +327,7 @@ TEST(CanonicalQueryEncoderTest, ComputeKeyGeoNear) {
"{}");
}
-TEST(CanonicalQueryEncoderTest, ComputeKeyRegexDependsOnFlags) {
+TEST_F(CanonicalQueryEncoderTest, ComputeKeyRegexDependsOnFlags) {
unittest::GoldenTestContext gctx(&goldenTestConfig);
// The computed key depends on which execution engine is enabled. As such, we enable SBE for
// this test in order to ensure that we have coverage for both SBE and the classic engine.
@@ -360,7 +362,7 @@ TEST(CanonicalQueryEncoderTest, ComputeKeyRegexDependsOnFlags) {
testComputeKey(gctx, "{a: /abc/im}", "{}", "{}");
}
-TEST(CanonicalQueryEncoderTest, ComputeKeyMatchInDependsOnPresenceOfRegexAndFlags) {
+TEST_F(CanonicalQueryEncoderTest, ComputeKeyMatchInDependsOnPresenceOfRegexAndFlags) {
unittest::GoldenTestContext gctx(&goldenTestConfig);
// The computed key depends on which execution engine is enabled. As such, we disable SBE for
// this test so that the test doesn't break should the default value of
@@ -406,7 +408,7 @@ TEST(CanonicalQueryEncoderTest, ComputeKeyMatchInDependsOnPresenceOfRegexAndFlag
testComputeKey(gctx, "{a: {$not: {$in: [/foo/i]}}}", "{}", "{}");
}
-TEST(CanonicalQueryEncoderTest, CheckCollationIsEncoded) {
+TEST_F(CanonicalQueryEncoderTest, CheckCollationIsEncoded) {
unittest::GoldenTestContext gctx(&goldenTestConfig);
// The computed key depends on which execution engine is enabled. As such, we disable SBE for
// this test so that the test doesn't break should the default value of
@@ -415,12 +417,12 @@ TEST(CanonicalQueryEncoderTest, CheckCollationIsEncoded) {
"forceClassicEngine");
unique_ptr<CanonicalQuery> cq(canonicalize(
- fromjson("{a: 1, b: 1}"), {}, {}, fromjson("{locale: 'mock_reverse_string'}")));
+ opCtx(), fromjson("{a: 1, b: 1}"), {}, {}, fromjson("{locale: 'mock_reverse_string'}")));
testComputeKey(gctx, *cq);
}
-TEST(CanonicalQueryEncoderTest, ComputeKeySBE) {
+TEST_F(CanonicalQueryEncoderTest, ComputeKeySBE) {
unittest::GoldenTestContext gctx(&goldenTestConfig);
// Generated cache keys should be treated as opaque to the user.
@@ -493,7 +495,7 @@ TEST(CanonicalQueryEncoderTest, ComputeKeySBE) {
testComputeSBEKey(gctx, "{a: 1}", "{}", "{}", std::move(findCommand));
}
-TEST(CanonicalQueryEncoderTest, ComputeKeySBEWithPipeline) {
+TEST_F(CanonicalQueryEncoderTest, ComputeKeySBEWithPipeline) {
unittest::GoldenTestContext gctx(&goldenTestConfig);
// SBE must be enabled in order to generate SBE plan cache keys.
RAIIServerParameterControllerForTest controllerSBE("internalQueryFrameworkControl",
@@ -523,7 +525,7 @@ TEST(CanonicalQueryEncoderTest, ComputeKeySBEWithPipeline) {
{getLookupBson("a", "b", "as"), getLookupBson("a1", "b1", "as1")});
}
-TEST(CanonicalQueryEncoderTest, ComputeKeySBEWithReadConcern) {
+TEST_F(CanonicalQueryEncoderTest, ComputeKeySBEWithReadConcern) {
unittest::GoldenTestContext gctx(&goldenTestConfig);
// SBE must be enabled in order to generate SBE plan cache keys.
RAIIServerParameterControllerForTest controllerSBE("internalQueryFrameworkControl",
@@ -546,5 +548,29 @@ TEST(CanonicalQueryEncoderTest, ComputeKeySBEWithReadConcern) {
testComputeSBEKey(gctx, "{a: 1}", "{a: 1}", "{}", std::move(findCommand));
}
+TEST_F(CanonicalQueryEncoderTest, ComputeKeyWithApiStrict) {
+ unittest::GoldenTestContext gctx(&goldenTestConfig);
+ {
+ RAIIServerParameterControllerForTest controllerSBE("internalQueryFrameworkControl",
+ "forceClassicEngine");
+ APIParameters::get(opCtx()).setAPIStrict(false);
+ testComputeKey(gctx, "{}", "{}", "{}");
+
+ APIParameters::get(opCtx()).setAPIStrict(true);
+ testComputeKey(gctx, "{}", "{}", "{}");
+ }
+
+ {
+ RAIIServerParameterControllerForTest controllerSBE("internalQueryFrameworkControl",
+ "trySbeEngine");
+
+ APIParameters::get(opCtx()).setAPIStrict(false);
+ testComputeSBEKey(gctx, "{}", "{}", "{}");
+
+ APIParameters::get(opCtx()).setAPIStrict(true);
+ testComputeSBEKey(gctx, "{}", "{}", "{}");
+ }
+}
+
} // namespace
} // namespace mongo
diff --git a/src/mongo/db/query/canonical_query_test_util.cpp b/src/mongo/db/query/canonical_query_test_util.cpp
index 7203c7e9992..ff83ae0df14 100644
--- a/src/mongo/db/query/canonical_query_test_util.cpp
+++ b/src/mongo/db/query/canonical_query_test_util.cpp
@@ -29,22 +29,18 @@
#include "mongo/db/query/canonical_query_test_util.h"
-#include "mongo/db/query/query_test_service_context.h"
-#include "mongo/unittest/unittest.h"
-
namespace mongo {
+const NamespaceString CanonicalQueryTest::nss("test.collection");
+
/**
* Utility functions to create a CanonicalQuery
*/
-std::unique_ptr<CanonicalQuery> canonicalize(const BSONObj& queryObj) {
- QueryTestServiceContext serviceContext;
- auto opCtx = serviceContext.makeOperationContext();
-
+std::unique_ptr<CanonicalQuery> CanonicalQueryTest::canonicalize(const BSONObj& queryObj) {
auto findCommand = std::make_unique<FindCommandRequest>(nss);
findCommand->setFilter(queryObj);
const boost::intrusive_ptr<ExpressionContext> expCtx;
auto statusWithCQ =
- CanonicalQuery::canonicalize(opCtx.get(),
+ CanonicalQuery::canonicalize(opCtx(),
std::move(findCommand),
false,
expCtx,
@@ -54,18 +50,15 @@ std::unique_ptr<CanonicalQuery> canonicalize(const BSONObj& queryObj) {
return std::move(statusWithCQ.getValue());
}
-std::unique_ptr<CanonicalQuery> canonicalize(StringData queryStr) {
+std::unique_ptr<CanonicalQuery> CanonicalQueryTest::canonicalize(StringData queryStr) {
BSONObj queryObj = fromjson(queryStr.toString());
return canonicalize(queryObj);
}
-std::unique_ptr<CanonicalQuery> canonicalize(BSONObj query,
- BSONObj sort,
- BSONObj proj,
- BSONObj collation) {
- QueryTestServiceContext serviceContext;
- auto opCtx = serviceContext.makeOperationContext();
-
+std::unique_ptr<CanonicalQuery> CanonicalQueryTest::canonicalize(BSONObj query,
+ BSONObj sort,
+ BSONObj proj,
+ BSONObj collation) {
auto findCommand = std::make_unique<FindCommandRequest>(nss);
findCommand->setFilter(query);
findCommand->setSort(sort);
@@ -73,7 +66,7 @@ std::unique_ptr<CanonicalQuery> canonicalize(BSONObj query,
findCommand->setCollation(collation);
const boost::intrusive_ptr<ExpressionContext> expCtx;
auto statusWithCQ =
- CanonicalQuery::canonicalize(opCtx.get(),
+ CanonicalQuery::canonicalize(opCtx(),
std::move(findCommand),
false,
expCtx,
@@ -83,25 +76,22 @@ std::unique_ptr<CanonicalQuery> canonicalize(BSONObj query,
return std::move(statusWithCQ.getValue());
}
-std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
- const char* sortStr,
- const char* projStr,
- const char* collationStr) {
+std::unique_ptr<CanonicalQuery> CanonicalQueryTest::canonicalize(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ const char* collationStr) {
return canonicalize(
fromjson(queryStr), fromjson(sortStr), fromjson(projStr), fromjson(collationStr));
}
-std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
- const char* sortStr,
- const char* projStr,
- long long skip,
- long long limit,
- const char* hintStr,
- const char* minStr,
- const char* maxStr) {
- QueryTestServiceContext serviceContext;
- auto opCtx = serviceContext.makeOperationContext();
-
+std::unique_ptr<CanonicalQuery> CanonicalQueryTest::canonicalize(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ long long skip,
+ long long limit,
+ const char* hintStr,
+ const char* minStr,
+ const char* maxStr) {
auto findCommand = std::make_unique<FindCommandRequest>(nss);
findCommand->setFilter(fromjson(queryStr));
findCommand->setSort(fromjson(sortStr));
@@ -117,7 +107,7 @@ std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
findCommand->setMax(fromjson(maxStr));
const boost::intrusive_ptr<ExpressionContext> expCtx;
auto statusWithCQ =
- CanonicalQuery::canonicalize(opCtx.get(),
+ CanonicalQuery::canonicalize(opCtx(),
std::move(findCommand),
false,
expCtx,
@@ -127,18 +117,15 @@ std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
return std::move(statusWithCQ.getValue());
}
-std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
- const char* sortStr,
- const char* projStr,
- long long skip,
- long long limit,
- const char* hintStr,
- const char* minStr,
- const char* maxStr,
- bool explain) {
- QueryTestServiceContext serviceContext;
- auto opCtx = serviceContext.makeOperationContext();
-
+std::unique_ptr<CanonicalQuery> CanonicalQueryTest::canonicalize(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ long long skip,
+ long long limit,
+ const char* hintStr,
+ const char* minStr,
+ const char* maxStr,
+ bool explain) {
auto findCommand = std::make_unique<FindCommandRequest>(nss);
findCommand->setFilter(fromjson(queryStr));
findCommand->setSort(fromjson(sortStr));
@@ -154,7 +141,7 @@ std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
findCommand->setMax(fromjson(maxStr));
const boost::intrusive_ptr<ExpressionContext> expCtx;
auto statusWithCQ =
- CanonicalQuery::canonicalize(opCtx.get(),
+ CanonicalQuery::canonicalize(opCtx(),
std::move(findCommand),
explain,
expCtx,
diff --git a/src/mongo/db/query/canonical_query_test_util.h b/src/mongo/db/query/canonical_query_test_util.h
index 17dedf7045f..bb69978711c 100644
--- a/src/mongo/db/query/canonical_query_test_util.h
+++ b/src/mongo/db/query/canonical_query_test_util.h
@@ -28,36 +28,51 @@
*/
#include "mongo/db/query/canonical_query.h"
+#include "mongo/db/query/query_test_service_context.h"
+#include "mongo/unittest/unittest.h"
namespace mongo {
+class CanonicalQueryTest : public unittest::Test {
+public:
+ CanonicalQueryTest() : _opCtx(_serviceContext.makeOperationContext()) {}
-const NamespaceString nss("test.collection");
+protected:
+ OperationContext* opCtx() const {
+ return _opCtx.get();
+ }
-std::unique_ptr<CanonicalQuery> canonicalize(const BSONObj& queryObj);
-std::unique_ptr<CanonicalQuery> canonicalize(StringData queryStr);
-std::unique_ptr<CanonicalQuery> canonicalize(BSONObj query,
- BSONObj sort,
- BSONObj proj,
- BSONObj collation);
-std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
- const char* sortStr,
- const char* projStr,
- const char* collationStr);
-std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
- const char* sortStr,
- const char* projStr,
- long long skip,
- long long limit,
- const char* hintStr,
- const char* minStr,
- const char* maxStr);
-std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
- const char* sortStr,
- const char* projStr,
- long long skip,
- long long limit,
- const char* hintStr,
- const char* minStr,
- const char* maxStr,
- bool explain);
+ std::unique_ptr<CanonicalQuery> canonicalize(const BSONObj& queryObj);
+ std::unique_ptr<CanonicalQuery> canonicalize(StringData queryStr);
+ std::unique_ptr<CanonicalQuery> canonicalize(BSONObj query,
+ BSONObj sort,
+ BSONObj proj,
+ BSONObj collation);
+ std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ const char* collationStr);
+ std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ long long skip,
+ long long limit,
+ const char* hintStr,
+ const char* minStr,
+ const char* maxStr);
+ std::unique_ptr<CanonicalQuery> canonicalize(const char* queryStr,
+ const char* sortStr,
+ const char* projStr,
+ long long skip,
+ long long limit,
+ const char* hintStr,
+ const char* minStr,
+ const char* maxStr,
+ bool explain);
+
+ static const NamespaceString nss;
+
+private:
+ QueryTestServiceContext _serviceContext;
+ ServiceContext::UniqueOperationContext _opCtx;
+};
} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache_key_info_test.cpp b/src/mongo/db/query/plan_cache_key_info_test.cpp
index 824fdf9dc37..86d9a16309c 100644
--- a/src/mongo/db/query/plan_cache_key_info_test.cpp
+++ b/src/mongo/db/query/plan_cache_key_info_test.cpp
@@ -50,6 +50,8 @@ std::ostream& operator<<(std::ostream& stream, const PlanCacheKeyInfo& key) {
}
namespace {
+using PlanCacheKeyInfoTest = CanonicalQueryTest;
+
PlanCacheKeyInfo makeKey(const CanonicalQuery& cq,
const std::vector<CoreIndexInfo>& indexCores = {}) {
PlanCacheIndexabilityState indexabilityState;
@@ -108,20 +110,25 @@ void assertPlanCacheKeysUnequalDueToForceClassicEngineValue(const PlanCacheKeyIn
auto bStablePart = b.getQueryShape();
ASSERT_EQ(aUnstablePart, bUnstablePart);
- // The last character of the stable part encodes the engine that uses this PlanCacheKey. So the
- // stable parts except for the last character should be identical.
- ASSERT_EQ(aStablePart.substr(0, aStablePart.size() - 1),
- bStablePart.substr(0, bStablePart.size() - 1));
+ // The last 2 characters (plus separator) of the stable part encodes the engine that uses this
+ // PlanCacheKey and if apiStrict was used. So the stable parts except for the last two
+ // characters should be identical.
+ ASSERT_EQ(aStablePart.substr(0, aStablePart.size() - 2),
+ bStablePart.substr(0, bStablePart.size() - 2));
- // Should have at least 1 byte to represent whether we must use the classic engine.
- ASSERT_GTE(aStablePart.size(), 1);
+ // Should have at least 2 byte to represent whether we must use the classic engine and stable
+ // API.
+ ASSERT_GTE(aStablePart.size(), 2);
// The indexability discriminators should match.
ASSERT_EQ(a.getIndexabilityDiscriminators(), b.getIndexabilityDiscriminators());
- // The stable parts should not match because of the last character.
+ // The stable parts should not match because of the second character from the back, encoding the
+ // engine type.
ASSERT_NE(aStablePart, bStablePart);
- ASSERT_NE(aStablePart.back(), bStablePart.back());
+ ASSERT_NE(aStablePart[aStablePart.size() - 2], bStablePart[bStablePart.size() - 2]);
+ // Ensure that the the apiStrict values are equal.
+ ASSERT_EQ(aStablePart.back(), bStablePart.back());
}
/**
@@ -141,7 +148,7 @@ void assertPlanCacheKeysUnequalDueToDiscriminators(const PlanCacheKeyInfo& a,
// When a sparse index is present, computeKey() should generate different keys depending on
// whether or not the predicates in the given query can use the index.
-TEST(PlanCacheKeyInfoTest, ComputeKeySparseIndex) {
+TEST_F(PlanCacheKeyInfoTest, ComputeKeySparseIndex) {
const auto keyPattern = BSON("a" << 1);
const std::vector<CoreIndexInfo> indexCores = {
CoreIndexInfo(keyPattern,
@@ -170,7 +177,7 @@ TEST(PlanCacheKeyInfoTest, ComputeKeySparseIndex) {
// When a partial index is present, computeKey() should generate different keys depending on
// whether or not the predicates in the given query "match" the predicates in the partial index
// filter.
-TEST(PlanCacheKeyInfoTest, ComputeKeyPartialIndex) {
+TEST_F(PlanCacheKeyInfoTest, ComputeKeyPartialIndex) {
BSONObj filterObj = BSON("f" << BSON("$gt" << 0));
unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
@@ -195,7 +202,7 @@ TEST(PlanCacheKeyInfoTest, ComputeKeyPartialIndex) {
}
// Query shapes should get the same plan cache key if they have the same collation indexability.
-TEST(PlanCacheKeyInfoTest, ComputeKeyCollationIndex) {
+TEST_F(PlanCacheKeyInfoTest, ComputeKeyCollationIndex) {
CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString);
const auto keyPattern = BSON("a" << 1);
@@ -262,7 +269,7 @@ TEST(PlanCacheKeyInfoTest, ComputeKeyCollationIndex) {
makeKey(*inContainsStringHasCollation, indexCores).getIndexabilityDiscriminators());
}
-TEST(PlanCacheKeyInfoTest, ComputeKeyWildcardIndex) {
+TEST_F(PlanCacheKeyInfoTest, ComputeKeyWildcardIndex) {
auto entryProjUpdatePair = makeWildcardUpdate(BSON("a.$**" << 1));
const std::vector<CoreIndexInfo> indexCores = {entryProjUpdatePair.first};
@@ -322,7 +329,7 @@ TEST(PlanCacheKeyInfoTest, ComputeKeyWildcardIndex) {
"<0><0>");
}
-TEST(PlanCacheKeyInfoTest, ComputeKeyWildcardIndexDiscriminatesEqualityToEmptyObj) {
+TEST_F(PlanCacheKeyInfoTest, ComputeKeyWildcardIndexDiscriminatesEqualityToEmptyObj) {
auto entryProjUpdatePair = makeWildcardUpdate(BSON("a.$**" << 1));
const std::vector<CoreIndexInfo> indexCores = {entryProjUpdatePair.first};
@@ -344,7 +351,8 @@ TEST(PlanCacheKeyInfoTest, ComputeKeyWildcardIndexDiscriminatesEqualityToEmptyOb
ASSERT_EQ(makeKey(*inWithEmptyObj, indexCores).getIndexabilityDiscriminators(), "<1>");
}
-TEST(PlanCacheKeyInfoTest, ComputeKeyWildcardDiscriminatesCorrectlyBasedOnPartialFilterExpression) {
+TEST_F(PlanCacheKeyInfoTest,
+ ComputeKeyWildcardDiscriminatesCorrectlyBasedOnPartialFilterExpression) {
BSONObj filterObj = BSON("x" << BSON("$gt" << 0));
std::unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
@@ -412,7 +420,7 @@ TEST(PlanCacheKeyInfoTest, ComputeKeyWildcardDiscriminatesCorrectlyBasedOnPartia
}
}
-TEST(PlanCacheKeyInfoTest, DifferentQueryEngines) {
+TEST_F(PlanCacheKeyInfoTest, DifferentQueryEngines) {
const auto keyPattern = BSON("a" << 1);
const std::vector<CoreIndexInfo> indexCores = {
CoreIndexInfo(keyPattern,
@@ -438,7 +446,8 @@ TEST(PlanCacheKeyInfoTest, DifferentQueryEngines) {
assertPlanCacheKeysUnequalDueToForceClassicEngineValue(classicEngineKey, noClassicEngineKey);
}
-TEST(PlanCacheKeyInfoTest, ComputeKeyWildcardDiscriminatesCorrectlyWithPartialFilterAndExpression) {
+TEST_F(PlanCacheKeyInfoTest,
+ ComputeKeyWildcardDiscriminatesCorrectlyWithPartialFilterAndExpression) {
// Partial filter is an AND of multiple conditions.
BSONObj filterObj = BSON("x" << BSON("$gt" << 0) << "y" << BSON("$gt" << 0));
std::unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
@@ -466,7 +475,8 @@ TEST(PlanCacheKeyInfoTest, ComputeKeyWildcardDiscriminatesCorrectlyWithPartialFi
}
}
-TEST(PlanCacheKeyInfoTest, ComputeKeyDiscriminatesCorrectlyWithPartialFilterAndWildcardProjection) {
+TEST_F(PlanCacheKeyInfoTest,
+ ComputeKeyDiscriminatesCorrectlyWithPartialFilterAndWildcardProjection) {
BSONObj filterObj = BSON("x" << BSON("$gt" << 0));
std::unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
@@ -498,7 +508,8 @@ TEST(PlanCacheKeyInfoTest, ComputeKeyDiscriminatesCorrectlyWithPartialFilterAndW
}
}
-TEST(PlanCacheKeyInfoTest, ComputeKeyWildcardDiscriminatesCorrectlyWithPartialFilterOnNestedField) {
+TEST_F(PlanCacheKeyInfoTest,
+ ComputeKeyWildcardDiscriminatesCorrectlyWithPartialFilterOnNestedField) {
BSONObj filterObj = BSON("x.y" << BSON("$gt" << 0));
std::unique_ptr<MatchExpression> filterExpr(parseMatchExpression(filterObj));
@@ -523,7 +534,7 @@ TEST(PlanCacheKeyInfoTest, ComputeKeyWildcardDiscriminatesCorrectlyWithPartialFi
}
}
-TEST(PlanCacheKeyInfoTest, StableKeyDoesNotChangeAcrossIndexCreation) {
+TEST_F(PlanCacheKeyInfoTest, StableKeyDoesNotChangeAcrossIndexCreation) {
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 0}}"));
const auto preIndexKey = makeKey(*cq);
const auto preIndexStableKey = preIndexKey.getQueryShape();
@@ -544,7 +555,7 @@ TEST(PlanCacheKeyInfoTest, StableKeyDoesNotChangeAcrossIndexCreation) {
ASSERT_EQ(postIndexKey.getIndexabilityDiscriminators(), "<1>");
}
-TEST(PlanCacheKeyInfoTest, ComputeKeyNotEqualsArray) {
+TEST_F(PlanCacheKeyInfoTest, ComputeKeyNotEqualsArray) {
unique_ptr<CanonicalQuery> cqNeArray(canonicalize("{a: {$ne: [1]}}"));
unique_ptr<CanonicalQuery> cqNeScalar(canonicalize("{a: {$ne: 123}}"));
@@ -577,7 +588,7 @@ TEST(PlanCacheKeyInfoTest, ComputeKeyNotEqualsArray) {
ASSERT_EQ(withIndexNeArrayKey.getIndexabilityDiscriminators(), "<0><1>");
}
-TEST(PlanCacheKeyInfoTest, ComputeKeyNinArray) {
+TEST_F(PlanCacheKeyInfoTest, ComputeKeyNinArray) {
unique_ptr<CanonicalQuery> cqNinArray(canonicalize("{a: {$nin: [123, [1]]}}"));
unique_ptr<CanonicalQuery> cqNinScalar(canonicalize("{a: {$nin: [123, 456]}}"));
@@ -618,7 +629,7 @@ TEST(PlanCacheKeyInfoTest, ComputeKeyNinArray) {
// Whether the discriminator referred to the first not-eq node or the second would be
// ambiguous. This would make it possible for two queries with different shapes (and different
// plans) to get the same plan cache key. We test that this does not happen for a simple example.
-TEST(PlanCacheKeyInfoTest, PlanCacheKeyCollision) {
+TEST_F(PlanCacheKeyInfoTest, PlanCacheKeyCollision) {
unique_ptr<CanonicalQuery> cqNeA(canonicalize("{$or: [{a: {$ne: 5}}, {a: {$ne: [12]}}]}"));
unique_ptr<CanonicalQuery> cqNeB(canonicalize("{$or: [{a: {$ne: [12]}}, {a: {$ne: 5}}]}"));
diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp
index 4897bea2115..a60bf12883c 100644
--- a/src/mongo/db/query/plan_cache_test.cpp
+++ b/src/mongo/db/query/plan_cache_test.cpp
@@ -74,6 +74,43 @@ using std::string;
using std::unique_ptr;
using std::vector;
+class PlanCacheTest : public CanonicalQueryTest {
+protected:
+ /**
+ * Test functions for shouldCacheQuery.
+ *
+ * Use these functions to assert which categories of canonicalized queries are suitable for
+ * inclusion in the plan cache.
+ */
+ void assertShouldCacheQuery(const CanonicalQuery& query) {
+ if (shouldCacheQuery(query)) {
+ return;
+ }
+ str::stream ss;
+ ss << "Canonical query should be cacheable: " << query.toString();
+ FAIL(ss);
+ }
+
+ void assertShouldNotCacheQuery(const CanonicalQuery& query) {
+ if (!shouldCacheQuery(query)) {
+ return;
+ }
+ str::stream ss;
+ ss << "Canonical query should not be cacheable: " << query.toString();
+ FAIL(ss);
+ }
+
+ void assertShouldNotCacheQuery(const BSONObj& query) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(query));
+ assertShouldNotCacheQuery(*cq);
+ }
+
+ void assertShouldNotCacheQuery(const char* queryStr) {
+ unique_ptr<CanonicalQuery> cq(canonicalize(queryStr));
+ assertShouldNotCacheQuery(*cq);
+ }
+};
+
PlanCacheKey makeKey(const CanonicalQuery& cq, const std::vector<CoreIndexInfo>& indexCores = {}) {
PlanCacheIndexabilityState indexabilityState;
indexabilityState.updateDiscriminators(indexCores);
@@ -84,10 +121,12 @@ PlanCacheKey makeKey(const CanonicalQuery& cq, const std::vector<CoreIndexInfo>&
return {PlanCacheKeyInfo{cq.encodeKey(), indexabilityKeyBuilder.str()}};
}
-// Helper which constructs a $** IndexEntry and returns it along with an owned ProjectionExecutor.
-// The latter simulates the ProjectionExecutor which, during normal operation, is owned and
-// maintained by the $** index's IndexAccessMethod, and is required because the plan cache will
-// obtain unowned pointers to it.
+/**
+ * Helper which constructs a $** IndexEntry and returns it along with an owned ProjectionExecutor.
+ * The latter simulates the ProjectionExecutor which, during normal operation, is owned and
+ * maintained by the $** index's IndexAccessMethod, and is required because the plan cache will
+ * obtain unowned pointers to it.
+ */
std::pair<IndexEntry, std::unique_ptr<WildcardProjection>> makeWildcardEntry(BSONObj keyPattern) {
auto wcProj = std::make_unique<WildcardProjection>(
WildcardKeyGenerator::createProjectionExecutor(keyPattern, {}));
@@ -108,11 +147,11 @@ std::pair<IndexEntry, std::unique_ptr<WildcardProjection>> makeWildcardEntry(BSO
}
//
-// Tests for CachedSolution
+// Tests for CachedSolution.
//
/**
- * Utility function to create a PlanRankingDecision
+ * Utility function to create a PlanRankingDecision.
*/
std::unique_ptr<plan_ranker::PlanRankingDecision> createDecision(size_t numPlans,
size_t works = 0) {
@@ -131,40 +170,6 @@ std::unique_ptr<plan_ranker::PlanRankingDecision> createDecision(size_t numPlans
return why;
}
-/**
- * Test functions for shouldCacheQuery
- * Use these functions to assert which categories
- * of canonicalized queries are suitable for inclusion
- * in the planner cache.
- */
-void assertShouldCacheQuery(const CanonicalQuery& query) {
- if (shouldCacheQuery(query)) {
- return;
- }
- str::stream ss;
- ss << "Canonical query should be cacheable: " << query.toString();
- FAIL(ss);
-}
-
-void assertShouldNotCacheQuery(const CanonicalQuery& query) {
- if (!shouldCacheQuery(query)) {
- return;
- }
- str::stream ss;
- ss << "Canonical query should not be cacheable: " << query.toString();
- FAIL(ss);
-}
-
-void assertShouldNotCacheQuery(const BSONObj& query) {
- unique_ptr<CanonicalQuery> cq(canonicalize(query));
- assertShouldNotCacheQuery(*cq);
-}
-
-void assertShouldNotCacheQuery(const char* queryStr) {
- unique_ptr<CanonicalQuery> cq(canonicalize(queryStr));
- assertShouldNotCacheQuery(*cq);
-}
-
std::unique_ptr<QuerySolution> getQuerySolutionForCaching() {
std::unique_ptr<QuerySolution> qs = std::make_unique<QuerySolution>();
qs->cacheData = std::make_unique<SolutionCacheData>();
@@ -173,69 +178,73 @@ std::unique_ptr<QuerySolution> getQuerySolutionForCaching() {
}
/**
- * Cacheable queries
- * These queries will be added to the cache with run-time statistics
- * and can be managed with the cache DB commands.
+ * Cacheable queries.
+ *
+ * These queries will be added to the cache with run-time statistics and can be managed with the
+ * cache DB commands.
*/
-TEST(PlanCacheTest, ShouldCacheQueryBasic) {
+TEST_F(PlanCacheTest, ShouldCacheQueryBasic) {
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
assertShouldCacheQuery(*cq);
}
-TEST(PlanCacheTest, ShouldCacheQuerySort) {
+TEST_F(PlanCacheTest, ShouldCacheQuerySort) {
unique_ptr<CanonicalQuery> cq(canonicalize("{}", "{a: -1}", "{_id: 0, a: 1}", "{}"));
assertShouldCacheQuery(*cq);
}
-/*
+/**
* Non-cacheable queries.
+ *
* These queries will be sent through the planning process everytime.
*/
/**
- * Collection scan
+ * Collection scan.
+ *
* This should normally be handled by the IDHack runner.
*/
-TEST(PlanCacheTest, ShouldNotCacheQueryCollectionScan) {
+TEST_F(PlanCacheTest, ShouldNotCacheQueryCollectionScan) {
unique_ptr<CanonicalQuery> cq(canonicalize("{}"));
assertShouldNotCacheQuery(*cq);
}
/**
- * Hint
+ * Hint.
+ *
* A hinted query implies strong user preference for a particular index.
* Therefore, not much point in caching.
*/
-TEST(PlanCacheTest, ShouldNotCacheQueryWithHint) {
+TEST_F(PlanCacheTest, ShouldNotCacheQueryWithHint) {
unique_ptr<CanonicalQuery> cq(
canonicalize("{a: 1}", "{}", "{}", 0, 0, "{a: 1, b: 1}", "{}", "{}"));
assertShouldNotCacheQuery(*cq);
}
/**
- * Min queries are a specialized case of hinted queries
+ * Min queries are a specialized case of hinted queries.
*/
-TEST(PlanCacheTest, ShouldNotCacheQueryWithMin) {
+TEST_F(PlanCacheTest, ShouldNotCacheQueryWithMin) {
unique_ptr<CanonicalQuery> cq(
canonicalize("{a: 1}", "{}", "{}", 0, 0, "{a: 1}", "{a: 100}", "{}"));
assertShouldNotCacheQuery(*cq);
}
/**
- * Max queries are non-cacheable for the same reasons as min queries.
+ * Max queries are non-cacheable for the same reasons as min queries.
*/
-TEST(PlanCacheTest, ShouldNotCacheQueryWithMax) {
+TEST_F(PlanCacheTest, ShouldNotCacheQueryWithMax) {
unique_ptr<CanonicalQuery> cq(
canonicalize("{a: 1}", "{}", "{}", 0, 0, "{a: 1}", "{}", "{a: 100}"));
assertShouldNotCacheQuery(*cq);
}
/**
- * $geoWithin queries with legacy coordinates are cacheable as long as
- * the planner is able to come up with a cacheable solution.
+ * $geoWithin queries with legacy coordinates are cacheable as long as the planner is able to come
+ * up with a cacheable solution.
*/
-TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyCoordinates) {
+TEST_F(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyCoordinates) {
unique_ptr<CanonicalQuery> cq(
canonicalize("{a: {$geoWithin: "
"{$box: [[-180, -90], [180, 90]]}}}"));
@@ -245,7 +254,7 @@ TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyCoordinates) {
/**
* $geoWithin queries with GeoJSON coordinates are supported by the index bounds builder.
*/
-TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinJSONCoordinates) {
+TEST_F(PlanCacheTest, ShouldCacheQueryWithGeoWithinJSONCoordinates) {
unique_ptr<CanonicalQuery> cq(
canonicalize("{a: {$geoWithin: "
"{$geometry: {type: 'Polygon', coordinates: "
@@ -256,7 +265,7 @@ TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinJSONCoordinates) {
/**
* $geoWithin queries with both legacy and GeoJSON coordinates are cacheable.
*/
-TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyAndJSONCoordinates) {
+TEST_F(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyAndJSONCoordinates) {
unique_ptr<CanonicalQuery> cq(
canonicalize("{$or: [{a: {$geoWithin: {$geometry: {type: 'Polygon', "
"coordinates: [[[0, 0], [0, 90], "
@@ -268,7 +277,7 @@ TEST(PlanCacheTest, ShouldCacheQueryWithGeoWithinLegacyAndJSONCoordinates) {
/**
* $geoIntersects queries are always cacheable because they support GeoJSON coordinates only.
*/
-TEST(PlanCacheTest, ShouldCacheQueryWithGeoIntersects) {
+TEST_F(PlanCacheTest, ShouldCacheQueryWithGeoIntersects) {
unique_ptr<CanonicalQuery> cq(
canonicalize("{a: {$geoIntersects: "
"{$geometry: {type: 'Point', coordinates: "
@@ -277,10 +286,10 @@ TEST(PlanCacheTest, ShouldCacheQueryWithGeoIntersects) {
}
/**
- * $geoNear queries are cacheable because we are able to distinguish
- * between flat and spherical queries.
+ * $geoNear queries are cacheable because we are able to distinguish between flat and spherical
+ * queries.
*/
-TEST(PlanCacheTest, ShouldNotCacheQueryWithGeoNear) {
+TEST_F(PlanCacheTest, ShouldNotCacheQueryWithGeoNear) {
unique_ptr<CanonicalQuery> cq(
canonicalize("{a: {$geoNear: {$geometry: {type: 'Point',"
"coordinates: [0,0]}, $maxDistance:100}}}"));
@@ -288,11 +297,10 @@ TEST(PlanCacheTest, ShouldNotCacheQueryWithGeoNear) {
}
/**
- * Explain queries are not-cacheable because of allPlans cannot
- * be accurately generated from stale cached stats in the plan cache for
- * non-winning plans.
+ * Explain queries are not-cacheable because of allPlans cannot be accurately generated from stale
+ * cached stats in the plan cache for non-winning plans.
*/
-TEST(PlanCacheTest, ShouldNotCacheQueryExplain) {
+TEST_F(PlanCacheTest, ShouldNotCacheQueryExplain) {
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}",
"{}",
"{}",
@@ -324,7 +332,7 @@ void addCacheEntryForShape(const CanonicalQuery& cq, PlanCache* planCache) {
ASSERT_OK(planCache->set(makeKey(cq), qs->cacheData->clone(), *decision, Date_t{}, &callbacks));
}
-TEST(PlanCacheTest, InactiveEntriesDisabled) {
+TEST_F(PlanCacheTest, InactiveEntriesDisabled) {
// Set the global flag for disabling active entries.
internalQueryCacheDisableInactiveEntries.store(true);
ON_BLOCK_EXIT([] { internalQueryCacheDisableInactiveEntries.store(false); });
@@ -355,7 +363,7 @@ TEST(PlanCacheTest, InactiveEntriesDisabled) {
ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
}
-TEST(PlanCacheTest, PlanCacheLRUPolicyRemovesInactiveEntries) {
+TEST_F(PlanCacheTest, PlanCacheLRUPolicyRemovesInactiveEntries) {
// Use a tiny cache size.
const size_t kCacheSize = 2;
PlanCache planCache(kCacheSize);
@@ -391,7 +399,7 @@ TEST(PlanCacheTest, PlanCacheLRUPolicyRemovesInactiveEntries) {
ASSERT_EQ(planCache.get(keyC).state, PlanCache::CacheEntryState::kPresentInactive);
}
-TEST(PlanCacheTest, PlanCacheRemoveDeletesInactiveEntries) {
+TEST_F(PlanCacheTest, PlanCacheRemoveDeletesInactiveEntries) {
PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
@@ -412,7 +420,7 @@ TEST(PlanCacheTest, PlanCacheRemoveDeletesInactiveEntries) {
ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
}
-TEST(PlanCacheTest, PlanCacheFlushDeletesInactiveEntries) {
+TEST_F(PlanCacheTest, PlanCacheFlushDeletesInactiveEntries) {
PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
@@ -433,7 +441,7 @@ TEST(PlanCacheTest, PlanCacheFlushDeletesInactiveEntries) {
ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
}
-TEST(PlanCacheTest, AddActiveCacheEntry) {
+TEST_F(PlanCacheTest, AddActiveCacheEntry) {
PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
@@ -464,7 +472,7 @@ TEST(PlanCacheTest, AddActiveCacheEntry) {
ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
}
-TEST(PlanCacheTest, WorksValueIncreases) {
+TEST_F(PlanCacheTest, WorksValueIncreases) {
PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
@@ -538,7 +546,7 @@ TEST(PlanCacheTest, WorksValueIncreases) {
ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
}
-TEST(PlanCacheTest, WorksValueIncreasesByAtLeastOne) {
+TEST_F(PlanCacheTest, WorksValueIncreasesByAtLeastOne) {
// Will use a very small growth coefficient.
const double kWorksCoeff = 1.10;
@@ -582,7 +590,7 @@ TEST(PlanCacheTest, WorksValueIncreasesByAtLeastOne) {
ASSERT_EQ(planCache.get(key).state, PlanCache::CacheEntryState::kNotPresent);
}
-TEST(PlanCacheTest, SetIsNoopWhenNewEntryIsWorse) {
+TEST_F(PlanCacheTest, SetIsNoopWhenNewEntryIsWorse) {
PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
@@ -626,7 +634,7 @@ TEST(PlanCacheTest, SetIsNoopWhenNewEntryIsWorse) {
ASSERT_EQ(entry->works.value(), 20U);
}
-TEST(PlanCacheTest, SetOverwritesWhenNewEntryIsBetter) {
+TEST_F(PlanCacheTest, SetOverwritesWhenNewEntryIsBetter) {
PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
@@ -669,7 +677,7 @@ TEST(PlanCacheTest, SetOverwritesWhenNewEntryIsBetter) {
ASSERT_EQ(entry->works.value(), 10U);
}
-TEST(PlanCacheTest, DeactivateCacheEntry) {
+TEST_F(PlanCacheTest, DeactivateCacheEntry) {
PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1}"));
auto qs = getQuerySolutionForCaching();
@@ -709,7 +717,7 @@ TEST(PlanCacheTest, DeactivateCacheEntry) {
ASSERT_EQ(entry->works.value(), 20U);
}
-TEST(PlanCacheTest, GetMatchingStatsMatchesAndSerializesCorrectly) {
+TEST_F(PlanCacheTest, GetMatchingStatsMatchesAndSerializesCorrectly) {
PlanCache planCache(5000);
// Create a cache entry with 5 works.
@@ -897,6 +905,7 @@ protected:
// Clean up any previous state from a call to runQueryFull or runQueryAsCommand.
solns.clear();
+ NamespaceString nss("test.collection");
auto findCommand = std::make_unique<FindCommandRequest>(nss);
findCommand->setFilter(query);
findCommand->setSort(sort);
@@ -1011,6 +1020,7 @@ protected:
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
+ NamespaceString nss("test.collection");
auto findCommand = std::make_unique<FindCommandRequest>(nss);
findCommand->setFilter(query);
findCommand->setSort(sort);
@@ -1715,7 +1725,7 @@ TEST_F(CachePlanSelectionTest, ContainedOrAndIntersection) {
"]}}}}");
}
-TEST(PlanCacheTest, PlanCacheSizeWithCRUDOperations) {
+TEST_F(PlanCacheTest, PlanCacheSizeWithCRUDOperations) {
PlanCache planCache(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1, b: 1}"));
auto qs = getQuerySolutionForCaching();
@@ -1789,7 +1799,7 @@ TEST(PlanCacheTest, PlanCacheSizeWithCRUDOperations) {
ASSERT_EQ(planCacheTotalSizeEstimateBytes.get(), originalSize);
}
-TEST(PlanCacheTest, PlanCacheSizeWithEviction) {
+TEST_F(PlanCacheTest, PlanCacheSizeWithEviction) {
const size_t kCacheSize = 5;
PlanCache planCache(kCacheSize);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1, b: 1}"));
@@ -1867,7 +1877,7 @@ TEST(PlanCacheTest, PlanCacheSizeWithEviction) {
}
}
-TEST(PlanCacheTest, PlanCacheSizeWithMultiplePlanCaches) {
+TEST_F(PlanCacheTest, PlanCacheSizeWithMultiplePlanCaches) {
PlanCache planCache1(5000);
PlanCache planCache2(5000);
unique_ptr<CanonicalQuery> cq(canonicalize("{a: 1, b: 1}"));
@@ -1930,7 +1940,7 @@ TEST(PlanCacheTest, PlanCacheSizeWithMultiplePlanCaches) {
ASSERT_EQ(planCacheTotalSizeEstimateBytes.get(), originalSize);
}
-TEST(PlanCacheTest, PlanCacheMaxSizeParameterCanBeZero) {
+TEST_F(PlanCacheTest, PlanCacheMaxSizeParameterCanBeZero) {
PlanCache planCache{0U};
unique_ptr<CanonicalQuery> query(canonicalize("{a: 1, c: 1}"));
auto qs = getQuerySolutionForCaching();
diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/check_collation_is_encoded.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/check_collation_is_encoded.txt
index 9dde797abee..af896002c31 100644
--- a/src/mongo/db/test_output/query/canonical_query_encoder_test/check_collation_is_encoded.txt
+++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/check_collation_is_encoded.txt
@@ -1,8 +1,8 @@
-==== VARIATION: cq=ns=testdb.testcollTree: $and
+==== VARIATION: cq=ns=test.collectionTree: $and
a $eq 1
b $eq 1
Sort: {}
Proj: {}
Collation: { locale: "mock_reverse_string" }
-an[eqa,eqb]#mock_reverse_string02300000@f
+an[eqa,eqb]#mock_reverse_string02300000@ff
diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key.txt
index 8c2eef252b6..3cb43ca7d0a 100644
--- a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key.txt
+++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key.txt
@@ -1,80 +1,80 @@
==== VARIATION: query={}, sort={}, proj={}
-an@f
+an@ff
==== VARIATION: query={ $or: [ { a: 1 }, { b: 2 } ] }, sort={}, proj={}
-or[eqa,eqb]@f
+or[eqa,eqb]@ff
==== VARIATION: query={ $or: [ { a: 1 }, { b: 1 }, { c: 1 } ], d: 1 }, sort={}, proj={}
-an[or[eqa,eqb,eqc],eqd]@f
+an[or[eqa,eqb,eqc],eqd]@ff
==== VARIATION: query={ $or: [ { a: 1 }, { b: 1 } ], c: 1, d: 1 }, sort={}, proj={}
-an[or[eqa,eqb],eqc,eqd]@f
+an[or[eqa,eqb],eqc,eqd]@ff
==== VARIATION: query={ a: 1, b: 1, c: 1 }, sort={}, proj={}
-an[eqa,eqb,eqc]@f
+an[eqa,eqb,eqc]@ff
==== VARIATION: query={ a: 1, beqc: 1 }, sort={}, proj={}
-an[eqa,eqbeqc]@f
+an[eqa,eqbeqc]@ff
==== VARIATION: query={ ap1a: 1 }, sort={}, proj={}
-eqap1a@f
+eqap1a@ff
==== VARIATION: query={ aab: 1 }, sort={}, proj={}
-eqaab@f
+eqaab@ff
==== VARIATION: query={}, sort={ a: 1 }, proj={}
-an~aa@f
+an~aa@ff
==== VARIATION: query={}, sort={ a: -1 }, proj={}
-an~da@f
+an~da@ff
==== VARIATION: query={ $text: { $search: "search keywords" } }, sort={ a: { $meta: "textScore" } }, proj={ a: { $meta: "textScore" } }
-te_fts~ta@f
+te_fts~ta@ff
==== VARIATION: query={ a: 1 }, sort={ b: 1 }, proj={}
-eqa~ab@f
+eqa~ab@ff
==== VARIATION: query={}, sort={}, proj={ a: 1 }
-an|_id-a@f
+an|_id-a@ff
==== VARIATION: query={}, sort={}, proj={ a: -1 }
-an|_id-a@f
+an|_id-a@ff
==== VARIATION: query={}, sort={}, proj={ a: -1.0 }
-an|_id-a@f
+an|_id-a@ff
==== VARIATION: query={}, sort={}, proj={ a: true }
-an|_id-a@f
+an|_id-a@ff
==== VARIATION: query={}, sort={}, proj={ a: 0 }
-an@f
+an@ff
==== VARIATION: query={}, sort={}, proj={ a: false }
-an@f
+an@ff
==== VARIATION: query={}, sort={}, proj={ a: 99 }
-an|_id-a@f
+an|_id-a@ff
==== VARIATION: query={}, sort={}, proj={ a: "foo" }
-an|_id@f
+an|_id@ff
==== VARIATION: query={}, sort={}, proj={ a: { $slice: [ 3, 5 ] } }
-an@f
+an@ff
==== VARIATION: query={}, sort={}, proj={ a: { $slice: [ 3, 5 ] }, b: 0 }
-an@f
+an@ff
==== VARIATION: query={}, sort={}, proj={ a: { $slice: [ 3, 5 ] }, b: 1 }
-an@f
+an@ff
==== VARIATION: query={}, sort={}, proj={ a: { $elemMatch: { x: 2 } } }
-an@f
+an@ff
==== VARIATION: query={}, sort={}, proj={ a: { $elemMatch: { x: 2 } }, b: 0 }
-an@f
+an@ff
==== VARIATION: query={}, sort={}, proj={ a: { $elemMatch: { x: 2 } }, b: 1 }
-an@f
+an@ff
==== VARIATION: query={}, sort={}, proj={ a: { $slice: [ 3, 5 ] }, b: { $elemMatch: { x: 2 } } }
-an@f
+an@ff
==== VARIATION: query={}, sort={}, proj={ a: ObjectId('507f191e810c19729de860ea') }
-an|_id@f
+an|_id@ff
==== VARIATION: query={}, sort={}, proj={ _id: 0, a: ObjectId('507f191e810c19729de860ea'), b: "foo" }
-an|@f
+an|@ff
==== VARIATION: query={ a: 1 }, sort={}, proj={ a.$: 1 }
-eqa@f
+eqa@ff
==== VARIATION: query={ a: 1 }, sort={}, proj={ a: 1 }
-eqa|_id-a@f
+eqa|_id-a@ff
==== VARIATION: query={}, sort={}, proj={ a: 1, b: 1 }
-an|_id-a-b@f
+an|_id-a-b@ff
==== VARIATION: query={}, sort={}, proj={ b: 1, a: 1 }
-an|_id-a-b@f
+an|_id-a-b@ff
==== VARIATION: query={}, sort={}, proj={ b-1: 1, a-2: 1 }
-an|_id-a\-2-b\-1@f
+an|_id-a\-2-b\-1@ff
==== VARIATION: query={}, sort={ x: 1 }, proj={ $sortKey: { $meta: "sortKey" } }
-an~ax@f
+an~ax@ff
==== VARIATION: query={}, sort={}, proj={}
-an@f
+an@ff
==== VARIATION: query={}, sort={ x: 1 }, proj={ a: 1, $sortKey: { $meta: "sortKey" } }
-an~ax|_id-a@f
+an~ax|_id-a@ff
==== VARIATION: query={}, sort={}, proj={ a: 1 }
-an|_id-a@f
+an|_id-a@ff
==== VARIATION: query={ $or: [ { a: 1 } ] }, sort={}, proj={ _id: 0, a: 1 }
-eqa|a@f
+eqa|a@ff
==== VARIATION: query={ $or: [ { a: 1 } ] }, sort={}, proj={ a.$: 1 }
-eqa@f
+eqa@ff
diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_escaped.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_escaped.txt
index aef2b2338d3..537b9e366c0 100644
--- a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_escaped.txt
+++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_escaped.txt
@@ -1,8 +1,8 @@
==== VARIATION: query={ a,[]~|-<>: 1 }, sort={}, proj={}
-eqa\,\[\]\~\|\-<>@f
+eqa\,\[\]\~\|\-<>@ff
==== VARIATION: query={}, sort={ a,[]~|-<>: 1 }, proj={}
-an~aa\,\[\]\~\|\-<>@f
+an~aa\,\[\]\~\|\-<>@ff
==== VARIATION: query={}, sort={}, proj={ a,[]~|-<>: 1 }
-an|_id-a\,\[\]\~\|\-<>@f
+an|_id-a\,\[\]\~\|\-<>@ff
==== VARIATION: query={}, sort={}, proj={ a: "foo,[]~|-<>" }
-an|_id@f
+an|_id@ff
diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_geo_near.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_geo_near.txt
index feee396f80a..bb781dc8584 100644
--- a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_geo_near.txt
+++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_geo_near.txt
@@ -1,6 +1,6 @@
==== VARIATION: query={ a: { $near: [ 0, 0 ], $maxDistance: 0.3 } }, sort={}, proj={}
-gnanrfl@f
+gnanrfl@ff
==== VARIATION: query={ a: { $nearSphere: [ 0, 0 ], $maxDistance: 0.31 } }, sort={}, proj={}
-gnanssp@f
+gnanssp@ff
==== VARIATION: query={ a: { $geoNear: { $geometry: { type: "Point", coordinates: [ 0, 0 ] }, $maxDistance: 100 } } }, sort={}, proj={}
-gnanrsp@f
+gnanrsp@ff
diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_match_in_depends_on_presence_of_regex_and_flags.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_match_in_depends_on_presence_of_regex_and_flags.txt
index 8d38dccb417..720c1b33d05 100644
--- a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_match_in_depends_on_presence_of_regex_and_flags.txt
+++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_match_in_depends_on_presence_of_regex_and_flags.txt
@@ -1,30 +1,30 @@
==== VARIATION: query={ a: { $in: [ /foo/ ] } }, sort={}, proj={}
-rea@f
+rea@ff
==== VARIATION: query={ a: { $in: [ /foo/i ] } }, sort={}, proj={}
-rea/i/@f
+rea/i/@ff
==== VARIATION: query={ a: { $in: [ 1, "foo" ] } }, sort={}, proj={}
-ina@f
+ina@ff
==== VARIATION: query={ a: { $in: [ 1, /foo/ ] } }, sort={}, proj={}
-ina_re@f
+ina_re@ff
==== VARIATION: query={ a: { $in: [ 1, /foo/is ] } }, sort={}, proj={}
-ina_re/is/@f
+ina_re/is/@ff
==== VARIATION: query={ a: { $in: [ 1, /foo/si ] } }, sort={}, proj={}
-ina_re/is/@f
+ina_re/is/@ff
==== VARIATION: query={ a: { $in: [ 1, /foo/i, /bar/m, /baz/s ] } }, sort={}, proj={}
-ina_re/ims/@f
+ina_re/ims/@ff
==== VARIATION: query={ a: { $in: [ 1, /foo/i, /bar/m, /baz/s, /qux/i, /quux/s ] } }, sort={}, proj={}
-ina_re/ims/@f
+ina_re/ims/@ff
==== VARIATION: query={ a: { $in: [ 1, /foo/ism, /bar/msi, /baz/im, /qux/si, /quux/im ] } }, sort={}, proj={}
-ina_re/ims/@f
+ina_re/ims/@ff
==== VARIATION: query={ a: { $in: [ 1, /foo/msi, /bar/ism, /baz/is, /qux/mi, /quux/im ] } }, sort={}, proj={}
-ina_re/ims/@f
+ina_re/ims/@ff
==== VARIATION: query={ a: { $not: { $in: [ 1, "foo" ] } } }, sort={}, proj={}
-nt[ina]@f
+nt[ina]@ff
==== VARIATION: query={ a: { $not: { $in: [ 1, /foo/ ] } } }, sort={}, proj={}
-nt[ina_re]@f
+nt[ina_re]@ff
==== VARIATION: query={ a: { $not: { $in: [ 1, /foo/i, /bar/i, /baz/msi ] } } }, sort={}, proj={}
-nt[ina_re/ims/]@f
+nt[ina_re/ims/]@ff
==== VARIATION: query={ a: { $not: { $in: [ /foo/ ] } } }, sort={}, proj={}
-nt[rea]@f
+nt[rea]@ff
==== VARIATION: query={ a: { $not: { $in: [ /foo/i ] } } }, sort={}, proj={}
-nt[rea/i/]@f
+nt[rea/i/]@ff
diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_regex_depends_on_flags.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_regex_depends_on_flags.txt
index a63619b0c61..987e2708e2b 100644
--- a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_regex_depends_on_flags.txt
+++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_regex_depends_on_flags.txt
@@ -1,24 +1,24 @@
==== VARIATION: query={ a: /sometext/ }, sort={}, proj={}
-rea@t
+rea@tf
==== VARIATION: query={ a: /sometext/ }, sort={}, proj={}
-rea@t
+rea@tf
==== VARIATION: query={ a: /sometext/s }, sort={}, proj={}
-rea/s/@t
+rea/s/@tf
==== VARIATION: query={ a: /sometext/ms }, sort={}, proj={}
-rea/ms/@t
+rea/ms/@tf
==== VARIATION: query={ a: /sometext/im }, sort={}, proj={}
-rea/im/@t
+rea/im/@tf
==== VARIATION: query={ a: /sometext/mi }, sort={}, proj={}
-rea/im/@t
+rea/im/@tf
==== VARIATION: query={ a: /abc/mi }, sort={}, proj={}
-rea/im/@t
+rea/im/@tf
==== VARIATION: query={ a: /efg/mi }, sort={}, proj={}
-rea/im/@t
+rea/im/@tf
==== VARIATION: query={ a: //ms }, sort={}, proj={}
-rea/ms/@t
+rea/ms/@tf
==== VARIATION: query={ a: /___/ms }, sort={}, proj={}
-rea/ms/@t
+rea/ms/@tf
==== VARIATION: query={ a: { $regex: "abc", $options: "imxsu" } }, sort={}, proj={}
-rea/imsx/@t
+rea/imsx/@tf
==== VARIATION: query={ a: /abc/im }, sort={}, proj={}
-rea/im/@t
+rea/im/@tf
diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e.txt
index ef455815c92..ead917e1225 100644
--- a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e.txt
+++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e.txt
@@ -1,50 +1,50 @@
==== VARIATION: sbe, query={}, sort={}, proj={}
-YW4ABQAAAAAAAAAAAAAAAAAAbm5ubgUAAAAAZl4=
+YW4ABQAAAAAAAAAAAAAAAAAAAG5ubm4FAAAAAGZe
==== VARIATION: sbe, query={$or: [{a: 1}, {b: 2}]}, sort={}, proj={}
-b3IAW2VxAGE/AAAAACxlcQBiPwEAAABdBQAAAAAAAAAAAAAAAAAAbm5ubgUAAAAAZl4=
+b3IAW2VxAGE/AAAAACxlcQBiPwEAAABdBQAAAAAAAAAAAAAAAAAAAG5ubm4FAAAAAGZe
==== VARIATION: sbe, query={a: 1}, sort={}, proj={}
-ZXEAYT8AAAAABQAAAAAAAAAAAAAAAAAAbm5ubgUAAAAAZl4=
+ZXEAYT8AAAAABQAAAAAAAAAAAAAAAAAAAG5ubm4FAAAAAGZe
==== VARIATION: sbe, query={b: 1}, sort={}, proj={}
-ZXEAYj8AAAAABQAAAAAAAAAAAAAAAAAAbm5ubgUAAAAAZl4=
+ZXEAYj8AAAAABQAAAAAAAAAAAAAAAAAAAG5ubm4FAAAAAGZe
==== VARIATION: sbe, query={a: 1, b: 1, c: 1}, sort={}, proj={}
-YW4AW2VxAGE/AAAAACxlcQBiPwEAAAAsZXEAYz8CAAAAXQUAAAAAAAAAAAAAAAAAAG5ubm4FAAAAAGZe
+YW4AW2VxAGE/AAAAACxlcQBiPwEAAAAsZXEAYz8CAAAAXQUAAAAAAAAAAAAAAAAAAABubm5uBQAAAABmXg==
==== VARIATION: sbe, query={}, sort={a: 1}, proj={}
-YW4ABQAAAAB+YWEAAAAAAAAAAAAAbm5ubgUAAAAAZl4=
+YW4ABQAAAAB+YWEAAAAAAAAAAAAAAG5ubm4FAAAAAGZe
==== VARIATION: sbe, query={}, sort={a: -1}, proj={}
-YW4ABQAAAAB+ZGEAAAAAAAAAAAAAbm5ubgUAAAAAZl4=
+YW4ABQAAAAB+ZGEAAAAAAAAAAAAAAG5ubm4FAAAAAGZe
==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}
-ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAbm5ubgUAAAAAZl4=
+ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAAG5ubm4FAAAAAGZe
==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={a: 1}
-ZXEAYT8AAAAADAAAABBhAAEAAAAAfmFhAAAAAAAAAAAAAG5ubm4FAAAAAGZe
+ZXEAYT8AAAAADAAAABBhAAEAAAAAfmFhAAAAAAAAAAAAAABubm5uBQAAAABmXg==
==== VARIATION: sbe, query={}, sort={a: 1}, proj={a: 1}
-YW4ADAAAABBhAAEAAAAAfmFhAAAAAAAAAAAAAG5ubm4FAAAAAGZe
+YW4ADAAAABBhAAEAAAAAfmFhAAAAAAAAAAAAAABubm5uBQAAAABmXg==
==== VARIATION: sbe, query={}, sort={a: 1}, proj={a: 1}
-YW4ADAAAABBhAAEAAAAAfmFhAAAAAAAAAAAAAG5ubm4FAAAAAGZe
+YW4ADAAAABBhAAEAAAAAfmFhAAAAAAAAAAAAAABubm5uBQAAAABmXg==
==== VARIATION: sbe, query={}, sort={}, proj={a: 1}
-YW4ADAAAABBhAAEAAAAAAAAAAAAAAAAAAG5ubm4FAAAAAGZe
+YW4ADAAAABBhAAEAAAAAAAAAAAAAAAAAAABubm5uBQAAAABmXg==
==== VARIATION: sbe, query={}, sort={}, proj={a: true}
-YW4ACQAAAAhhAAEAAAAAAAAAAAAAAG5ubm4FAAAAAGZe
+YW4ACQAAAAhhAAEAAAAAAAAAAAAAAABubm5uBQAAAABmXg==
==== VARIATION: sbe, query={}, sort={}, proj={a: false}
-YW4ACQAAAAhhAAAAAAAAAAAAAAAAAG5ubm4FAAAAAGZe
+YW4ACQAAAAhhAAAAAAAAAAAAAAAAAABubm5uBQAAAABmXg==
==== VARIATION: sbe, query={}, sort={}, proj={}, isCountLike=true
-YW4ABQAAAAAAAQAAAAAAAAAAbm5ubgUAAAAAZl4=
+YW4ABQAAAAAAAQAAAAAAAAAAAG5ubm4FAAAAAGZe
==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=0
-ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAbm5ubgUAAAAAZl4=
+ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAAG5ubm4FAAAAAGZe
==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=1, returnKey=0, requestResumeToken=0
-ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAdG5ubgUAAAAAZl4=
+ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAAHRubm4FAAAAAGZe
==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=0
-ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAZm5ubgUAAAAAZl4=
+ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAAGZubm4FAAAAAGZe
==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=1, requestResumeToken=0
-ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAbnRubgUAAAAAZl4=
+ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAAG50bm4FAAAAAGZe
==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=0
-ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAbm5mbgUAAAAAZl4=
+ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAAG5uZm4FAAAAAGZe
==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=0
-ZXEAYT8AAAAABQAAAAB+YWEAAAoAAAAAAAAAAAAAAG5ubm4FAAAAAGZe
+ZXEAYT8AAAAABQAAAAB+YWEAAAAKAAAAAAAAAAAAAABubm5uBQAAAABmXg==
==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=0
-ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAKAAAAAAAAAG5ubm4FAAAAAGZe
+ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAACgAAAAAAAABubm5uBQAAAABmXg==
==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=0
-ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAbm5ubgUAAAAAZl4=
+ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAAG5ubm4FAAAAAGZe
==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=0
-ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAbm5ubgUAAAAAZl4=
+ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAAG5ubm4FAAAAAGZe
==== VARIATION: sbe, query={a: 1}, sort={}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=1
-ZXEAYT8AAAAABQAAAAAAAAAAAAAAAAAAbm50bhgAAAASJHJlY29yZElkAAEAAAAAAAAAAGZe
+ZXEAYT8AAAAABQAAAAAAAAAAAAAAAAAAAG5udG4YAAAAEiRyZWNvcmRJZAABAAAAAAAAAABmXg==
diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e_with_pipeline.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e_with_pipeline.txt
index 474cca1871e..fde58d486b6 100644
--- a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e_with_pipeline.txt
+++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e_with_pipeline.txt
@@ -1,12 +1,12 @@
==== VARIATION: sbe, query={a: 1}, sort={}, proj={}
-ZXEAYT8AAAAABQAAAAAAAAAAAAAAAAAAbm5ubgUAAAAAZl4=
+ZXEAYT8AAAAABQAAAAAAAAAAAAAAAAAAAG5ubm4FAAAAAGZe
==== VARIATION: sbe, query={a: 1}, sort={}, proj={}
-ZXEAYT8AAAAABQAAAAAAAAAAAAAAAAAAbm5ubgUAAAAAZl5aAAAAAyRsb29rdXAATAAAAAJmcm9tAAwAAABmb3JlaWduY29sbAACYXMAAwAAAGFzAAJsb2NhbEZpZWxkAAIAAABhAAJmb3JlaWduRmllbGQAAgAAAGIAAAA=
+ZXEAYT8AAAAABQAAAAAAAAAAAAAAAAAAAG5ubm4FAAAAAGZeWgAAAAMkbG9va3VwAEwAAAACZnJvbQAMAAAAZm9yZWlnbmNvbGwAAmFzAAMAAABhcwACbG9jYWxGaWVsZAACAAAAYQACZm9yZWlnbkZpZWxkAAIAAABiAAAA
==== VARIATION: sbe, query={a: 1}, sort={}, proj={}
-ZXEAYT8AAAAABQAAAAAAAAAAAAAAAAAAbm5ubgUAAAAAZl5bAAAAAyRsb29rdXAATQAAAAJmcm9tAAwAAABmb3JlaWduY29sbAACYXMAAwAAAGFzAAJsb2NhbEZpZWxkAAMAAABhMQACZm9yZWlnbkZpZWxkAAIAAABiAAAA
+ZXEAYT8AAAAABQAAAAAAAAAAAAAAAAAAAG5ubm4FAAAAAGZeWwAAAAMkbG9va3VwAE0AAAACZnJvbQAMAAAAZm9yZWlnbmNvbGwAAmFzAAMAAABhcwACbG9jYWxGaWVsZAADAAAAYTEAAmZvcmVpZ25GaWVsZAACAAAAYgAAAA==
==== VARIATION: sbe, query={a: 1}, sort={}, proj={}
-ZXEAYT8AAAAABQAAAAAAAAAAAAAAAAAAbm5ubgUAAAAAZl5bAAAAAyRsb29rdXAATQAAAAJmcm9tAAwAAABmb3JlaWduY29sbAACYXMAAwAAAGFzAAJsb2NhbEZpZWxkAAIAAABhAAJmb3JlaWduRmllbGQAAwAAAGIxAAAA
+ZXEAYT8AAAAABQAAAAAAAAAAAAAAAAAAAG5ubm4FAAAAAGZeWwAAAAMkbG9va3VwAE0AAAACZnJvbQAMAAAAZm9yZWlnbmNvbGwAAmFzAAMAAABhcwACbG9jYWxGaWVsZAACAAAAYQACZm9yZWlnbkZpZWxkAAMAAABiMQAAAA==
==== VARIATION: sbe, query={a: 1}, sort={}, proj={}
-ZXEAYT8AAAAABQAAAAAAAAAAAAAAAAAAbm5ubgUAAAAAZl5bAAAAAyRsb29rdXAATQAAAAJmcm9tAAwAAABmb3JlaWduY29sbAACYXMABAAAAGFzMQACbG9jYWxGaWVsZAACAAAAYQACZm9yZWlnbkZpZWxkAAIAAABiAAAA
+ZXEAYT8AAAAABQAAAAAAAAAAAAAAAAAAAG5ubm4FAAAAAGZeWwAAAAMkbG9va3VwAE0AAAACZnJvbQAMAAAAZm9yZWlnbmNvbGwAAmFzAAQAAABhczEAAmxvY2FsRmllbGQAAgAAAGEAAmZvcmVpZ25GaWVsZAACAAAAYgAAAA==
==== VARIATION: sbe, query={a: 1}, sort={}, proj={}
-ZXEAYT8AAAAABQAAAAAAAAAAAAAAAAAAbm5ubgUAAAAAZl5aAAAAAyRsb29rdXAATAAAAAJmcm9tAAwAAABmb3JlaWduY29sbAACYXMAAwAAAGFzAAJsb2NhbEZpZWxkAAIAAABhAAJmb3JlaWduRmllbGQAAgAAAGIAAABdAAAAAyRsb29rdXAATwAAAAJmcm9tAAwAAABmb3JlaWduY29sbAACYXMABAAAAGFzMQACbG9jYWxGaWVsZAADAAAAYTEAAmZvcmVpZ25GaWVsZAADAAAAYjEAAAA=
+ZXEAYT8AAAAABQAAAAAAAAAAAAAAAAAAAG5ubm4FAAAAAGZeWgAAAAMkbG9va3VwAEwAAAACZnJvbQAMAAAAZm9yZWlnbmNvbGwAAmFzAAMAAABhcwACbG9jYWxGaWVsZAACAAAAYQACZm9yZWlnbkZpZWxkAAIAAABiAAAAXQAAAAMkbG9va3VwAE8AAAACZnJvbQAMAAAAZm9yZWlnbmNvbGwAAmFzAAQAAABhczEAAmxvY2FsRmllbGQAAwAAAGExAAJmb3JlaWduRmllbGQAAwAAAGIxAAAA
diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e_with_read_concern.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e_with_read_concern.txt
index bde64417e52..15bf8272a09 100644
--- a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e_with_read_concern.txt
+++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_s_b_e_with_read_concern.txt
@@ -1,6 +1,6 @@
==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=0
-ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAbm5ubgUAAAAAZl4=
+ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAAG5ubm4FAAAAAGZe
==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=0
-ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAbm5ubgUAAAAAZl4=
+ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAAG5ubm4FAAAAAGZe
==== VARIATION: sbe, query={a: 1}, sort={a: 1}, proj={}, allowDiskUse=0, returnKey=0, requestResumeToken=0
-ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAbm5ubgUAAAAAdF4=
+ZXEAYT8AAAAABQAAAAB+YWEAAAAAAAAAAAAAAG5ubm4FAAAAAHRe
diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_with_api_strict.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_with_api_strict.txt
new file mode 100644
index 00000000000..b78e050d792
--- /dev/null
+++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_with_api_strict.txt
@@ -0,0 +1,8 @@
+==== VARIATION: query={}, sort={}, proj={}
+an@ff
+==== VARIATION: query={}, sort={}, proj={}
+an@ft
+==== VARIATION: sbe, query={}, sort={}, proj={}
+YW4ABQAAAAAAAAAAAAAAAAAAAG5ubm4FAAAAAGZe
+==== VARIATION: sbe, query={}, sort={}, proj={}
+YW4ABQAAAAAAAAEAAAAAAAAAAG5ubm4FAAAAAGZe
diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/encode_not_equal_null_predicates.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/encode_not_equal_null_predicates.txt
index 1a1209f3928..5c43534264e 100644
--- a/src/mongo/db/test_output/query/canonical_query_encoder_test/encode_not_equal_null_predicates.txt
+++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/encode_not_equal_null_predicates.txt
@@ -1,10 +1,10 @@
==== VARIATION: query={ a: { $not: { $eq: null } } }, sort={}, proj={ _id: 0, a: 1 }
-ntnot_eq_null[eqa]|a@f
+ntnot_eq_null[eqa]|a@ff
==== VARIATION: query={ a: { $not: { $eq: null } } }, sort={ a: 1 }, proj={ _id: 0, a: 1 }
-ntnot_eq_null[eqa]~aa|a@f
+ntnot_eq_null[eqa]~aa|a@ff
==== VARIATION: query={ a: { $not: { $gte: null } } }, sort={ a: 1 }, proj={ _id: 0, a: 1 }
-ntnot_eq_null[gea]~aa|a@f
+ntnot_eq_null[gea]~aa|a@ff
==== VARIATION: query={ a: { $not: { $lte: null } } }, sort={ a: 1 }, proj={ _id: 0, a: 1 }
-ntnot_eq_null[lea]~aa|a@f
+ntnot_eq_null[lea]~aa|a@ff
==== VARIATION: query={ a: { $not: { $eq: true } } }, sort={ a: 1 }, proj={ _id: 0, a: 1 }
-nt[eqa]~aa|a@f
+nt[eqa]~aa|a@ff