summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorIan Boros <ian.boros@10gen.com>2018-11-06 13:12:14 -0500
committerIan Boros <ian.boros@10gen.com>2018-11-06 13:12:14 -0500
commit84df94224aa86213250fa7e5e8d3a1ca6a71ac1b (patch)
treeb9539360704181b98492c5e28a7b372a18c97212 /jstests
parentc0f43e12c3cb6382421b9d08d12adcae12dfcbe5 (diff)
downloadmongo-84df94224aa86213250fa7e5e8d3a1ca6a71ac1b.tar.gz
Revert "SERVER-33303 Add stable plan cache key and use for index filters"
This reverts commit 36d4668e854d8bd17e8b684dbbf42b3b0903bbe7.
Diffstat (limited to 'jstests')
-rw-r--r--jstests/core/index_filter_catalog_independent.js89
-rw-r--r--jstests/core/index_filter_collation.js81
-rw-r--r--jstests/core/plan_cache_list_plans.js6
-rw-r--r--jstests/core/profile_query_hash.js62
-rw-r--r--jstests/core/query_hash_stability.js56
-rw-r--r--jstests/core/wildcard_index_cached_plans.js21
-rw-r--r--jstests/noPassthrough/log_and_profile_query_hash.js42
-rw-r--r--jstests/noPassthrough/plan_cache_stats_agg_source.js4
8 files changed, 52 insertions, 309 deletions
diff --git a/jstests/core/index_filter_catalog_independent.js b/jstests/core/index_filter_catalog_independent.js
deleted file mode 100644
index f3ea81a6627..00000000000
--- a/jstests/core/index_filter_catalog_independent.js
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * Test that index filters are applied regardless of catalog changes. Intended to reproduce
- * SERVER-33303.
- *
- * @tags: [
- * # This test performs queries with index filters set up. Since index filters are local to a
- * # mongod, and do not replicate, this test must issue all of its commands against the same
- * # node.
- * assumes_read_preference_unchanged,
- * does_not_support_stepdowns,
- * ]
- */
-(function() {
- "use strict";
-
- load("jstests/libs/analyze_plan.js"); // For getPlanStages.
-
- const collName = "index_filter_catalog_independent";
- const coll = db[collName];
- coll.drop();
-
- /*
- * Check that there's one index filter on the given query which allows only 'indexes'.
- */
- function assertOneIndexFilter(query, indexes) {
- let res = assert.commandWorked(db.runCommand({planCacheListFilters: collName}));
- assert.eq(res.filters.length, 1);
- assert.eq(res.filters[0].query, query);
- assert.eq(res.filters[0].indexes, indexes);
- }
-
- function assertIsIxScanOnIndex(winningPlan, keyPattern) {
- const ixScans = getPlanStages(winningPlan, "IXSCAN");
- assert.gt(ixScans.length, 0);
- ixScans.every((ixScan) => assert.eq(ixScan.keyPattern, keyPattern));
-
- const collScans = getPlanStages(winningPlan, "COLLSCAN");
- assert.eq(collScans.length, 0);
- }
-
- function checkIndexFilterSet(explain, shouldBeSet) {
- if (explain.queryPlanner.winningPlan.shards) {
- for (let shard of explain.queryPlanner.winningPlan.shards) {
- assert.eq(shard.indexFilterSet, shouldBeSet);
- }
- } else {
- assert.eq(explain.queryPlanner.indexFilterSet, shouldBeSet);
- }
- }
-
- assert.commandWorked(coll.createIndexes([{x: 1}, {x: 1, y: 1}]));
- assert.commandWorked(
- db.runCommand({planCacheSetFilter: collName, query: {"x": 3}, indexes: [{x: 1, y: 1}]}));
- assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]);
-
- let explain = assert.commandWorked(coll.find({x: 3}).explain());
- checkIndexFilterSet(explain, true);
- assertIsIxScanOnIndex(explain.queryPlanner.winningPlan, {x: 1, y: 1});
-
- // Drop an index. The filter should not change.
- assert.commandWorked(coll.dropIndex({x: 1, y: 1}));
- assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]);
-
- // The {x: 1} index _could_ be used, but should not be considered because of the filter.
- // Since we dropped the {x: 1, y: 1} index, a COLLSCAN must be used.
- explain = coll.find({x: 3}).explain();
- checkIndexFilterSet(explain, true);
- assert(isCollscan(db, explain.queryPlanner.winningPlan));
-
- // Create another index. This should not change whether the index filter is applied.
- assert.commandWorked(coll.createIndex({x: 1, z: 1}));
- explain = assert.commandWorked(coll.find({x: 3}).explain());
- checkIndexFilterSet(explain, true);
- assert(isCollscan(db, explain.queryPlanner.winningPlan));
-
- // Changing the catalog and then setting an index filter should not result in duplicate entries.
- assert.commandWorked(coll.createIndex({x: 1, a: 1}));
- assert.commandWorked(
- db.runCommand({planCacheSetFilter: collName, query: {"x": 3}, indexes: [{x: 1, y: 1}]}));
- assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]);
-
- // Recreate the {x: 1, y: 1} index and be sure that it's still used.
- assert.commandWorked(coll.createIndexes([{x: 1}, {x: 1, y: 1}]));
- assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]);
-
- explain = assert.commandWorked(coll.find({x: 3}).explain());
- checkIndexFilterSet(explain, true);
- assertIsIxScanOnIndex(explain.queryPlanner.winningPlan, {x: 1, y: 1});
-})();
diff --git a/jstests/core/index_filter_collation.js b/jstests/core/index_filter_collation.js
deleted file mode 100644
index 56e9c3a3132..00000000000
--- a/jstests/core/index_filter_collation.js
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Test that index filters are applied with the correct collation.
- * @tags: [
- * # This test attempts to perform queries with plan cache filters set up. The former operation
- * # may be routed to a secondary in the replica set, whereas the latter must be routed to the
- * # primary.
- * assumes_read_preference_unchanged,
- * does_not_support_stepdowns,
- * ]
- */
-(function() {
- "use strict";
-
- load("jstests/libs/analyze_plan.js"); // For getPlanStages.
-
- const collName = "index_filter_collation";
- const coll = db[collName];
-
- const caseInsensitive = {locale: "fr", strength: 2};
- coll.drop();
- assert.commandWorked(db.createCollection(collName, {collation: caseInsensitive}));
-
- function checkIndexFilterSet(explain, shouldBeSet) {
- if (explain.queryPlanner.winningPlan.shards) {
- for (let shard of explain.queryPlanner.winningPlan.shards) {
- assert.eq(shard.indexFilterSet, shouldBeSet);
- }
- } else {
- assert.eq(explain.queryPlanner.indexFilterSet, shouldBeSet);
- }
- }
-
- // Now create an index filter on a query with no collation specified.
- assert.commandWorked(coll.createIndexes([{x: 1}, {x: 1, y: 1}]));
- assert.commandWorked(
- db.runCommand({planCacheSetFilter: collName, query: {"x": 3}, indexes: [{x: 1, y: 1}]}));
-
- const listFilters = assert.commandWorked(db.runCommand({planCacheListFilters: collName}));
- assert.eq(listFilters.filters.length, 1);
- assert.eq(listFilters.filters[0].query, {x: 3});
- assert.eq(listFilters.filters[0].indexes, [{x: 1, y: 1}]);
-
- // Create an index filter on a query with the default collation specified.
- assert.commandWorked(db.runCommand({
- planCacheSetFilter: collName,
- query: {"x": 3},
- collation: caseInsensitive,
- indexes: [{x: 1}]
- }));
-
- // Although these two queries would run with the same collation, they have different "shapes"
- // so we expect there to be two index filters present.
- let res = assert.commandWorked(db.runCommand({planCacheListFilters: collName}));
- assert.eq(res.filters.length, 2);
-
- // One of the filters should only be applied to queries with the "fr" collation
- // and use the {x: 1} index.
- assert(res.filters.some((filter) => filter.hasOwnProperty("collation") &&
- filter.collation.locale === "fr" &&
- friendlyEqual(filter.indexes, [{x: 1}])));
-
- // The other should not have any collation, and allow the index {x: 1, y: 1}.
- assert(res.filters.some((filter) => !filter.hasOwnProperty("collation") &&
- friendlyEqual(filter.indexes, [{x: 1, y: 1}])));
-
- function assertIsIxScanOnIndex(winningPlan, keyPattern) {
- const ixScans = getPlanStages(winningPlan, "IXSCAN");
- assert.gt(ixScans.length, 0);
- assert.eq(ixScans[0].keyPattern, keyPattern);
- }
-
- // Run the queries and be sure the correct indexes are used.
- let explain = coll.find({x: 3}).explain();
- checkIndexFilterSet(explain, true);
- assertIsIxScanOnIndex(explain.queryPlanner.winningPlan, {x: 1, y: 1});
-
- // Run the queries and be sure the correct indexes are used.
- explain = coll.find({x: 3}).collation(caseInsensitive).explain();
- checkIndexFilterSet(explain, true);
- assertIsIxScanOnIndex(explain.queryPlanner.winningPlan, {x: 1});
-})();
diff --git a/jstests/core/plan_cache_list_plans.js b/jstests/core/plan_cache_list_plans.js
index 11c7922b4b1..fa36034446d 100644
--- a/jstests/core/plan_cache_list_plans.js
+++ b/jstests/core/plan_cache_list_plans.js
@@ -74,16 +74,12 @@
print('plan ' + i + ': ' + tojson(plans[i]));
}
- // Test the queryHash and planCacheKey property by comparing entries for two different
- // query shapes.
+ // Test the queryHash property by comparing entries for two different query shapes.
assert.eq(0, t.find({a: 132}).sort({b: -1, a: 1}).itcount(), 'unexpected document count');
let entryNewShape = getPlansForCacheEntry({a: 123}, {b: -1, a: 1}, {});
assert.eq(entry.hasOwnProperty("queryHash"), true);
assert.eq(entryNewShape.hasOwnProperty("queryHash"), true);
assert.neq(entry["queryHash"], entryNewShape["queryHash"]);
- assert.eq(entry.hasOwnProperty("planCacheKey"), true);
- assert.eq(entryNewShape.hasOwnProperty("planCacheKey"), true);
- assert.neq(entry["planCacheKey"], entryNewShape["planCacheKey"]);
//
// Tests for plan reason and feedback in planCacheListPlans
diff --git a/jstests/core/profile_query_hash.js b/jstests/core/profile_query_hash.js
index 4c7b3e23ab7..9707cbe8b3d 100644
--- a/jstests/core/profile_query_hash.js
+++ b/jstests/core/profile_query_hash.js
@@ -39,7 +39,7 @@
'unexpected document count');
const profileObj0 =
getLatestProfilerEntry(testDB, {op: "query", "command.comment": "Query0 find command"});
- assert(profileObj0.hasOwnProperty("planCacheKey"), tojson(profileObj0));
+ assert(profileObj0.hasOwnProperty("queryHash"), tojson(profileObj0));
let shapes = getShapes(coll);
assert.eq(1, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result');
@@ -50,30 +50,26 @@
'unexpected document count');
const profileObj1 =
getLatestProfilerEntry(testDB, {op: "query", "command.comment": "Query1 find command"});
- assert(profileObj1.hasOwnProperty("planCacheKey"), tojson(profileObj1));
+ assert(profileObj1.hasOwnProperty("queryHash"), tojson(profileObj1));
// Since the query shapes are the same, we only expect there to be one query shape present in
// the plan cache commands output.
shapes = getShapes(coll);
assert.eq(1, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result');
- assert.eq(
- profileObj0.planCacheKey, profileObj1.planCacheKey, 'unexpected not matching query hashes');
-
- // Test that the planCacheKey is the same in explain output for query0 and query1 as it was
- // in system.profile output.
- const explainQuery0 = assert.commandWorked(coll.find({a: 1, b: 1}, {a: 1})
- .sort({a: -1})
- .comment("Query0 find command")
- .explain("queryPlanner"));
- assert.eq(explainQuery0.queryPlanner.planCacheKey, profileObj0.planCacheKey, explainQuery0);
- const explainQuery1 = assert.commandWorked(coll.find({a: 2, b: 1}, {a: 1})
- .sort({a: -1})
- .comment("Query1 find command")
- .explain("queryPlanner"));
- assert.eq(explainQuery1.queryPlanner.planCacheKey, profileObj0.planCacheKey, explainQuery1);
-
- // Check that the 'planCacheKey' is the same for both query 0 and query 1.
- assert.eq(explainQuery0.queryPlanner.planCacheKey, explainQuery1.queryPlanner.planCacheKey);
+ assert.eq(profileObj0.queryHash, profileObj1.queryHash, 'unexpected not matching query hashes');
+
+ // Test that the queryHash is the same in explain output for query0 and query1 as it was in
+ // system.profile output.
+ let explain = assert.commandWorked(coll.find({a: 1, b: 1}, {a: 1})
+ .sort({a: -1})
+ .comment("Query0 find command")
+ .explain("queryPlanner"));
+ assert.eq(explain.queryPlanner.queryHash, profileObj0.queryHash, () => tojson(explain));
+ explain = assert.commandWorked(coll.find({a: 2, b: 1}, {a: 1})
+ .sort({a: -1})
+ .comment("Query1 find command")
+ .explain("queryPlanner"));
+ assert.eq(explain.queryPlanner.queryHash, profileObj0.queryHash, () => tojson(explain));
// Executes query2 and gets the corresponding system.profile entry.
assert.eq(0,
@@ -81,31 +77,19 @@
'unexpected document count');
const profileObj2 =
getLatestProfilerEntry(testDB, {op: "query", "command.comment": "Query2 find command"});
- assert(profileObj2.hasOwnProperty("planCacheKey"), tojson(profileObj2));
+ assert(profileObj2.hasOwnProperty("queryHash"), tojson(profileObj2));
// Query0 and query1 should both have the same query hash for the given indexes. Whereas, query2
// should have a unique hash. Asserts that a total of two distinct hashes results in two query
// shapes.
shapes = getShapes(coll);
assert.eq(2, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result');
- assert.neq(
- profileObj0.planCacheKey, profileObj2.planCacheKey, 'unexpected matching query hashes');
+ assert.neq(profileObj0.queryHash, profileObj2.queryHash, 'unexpected matching query hashes');
- // The planCacheKey in explain should be different for query2 than the hash from query0 and
- // query1.
- const explainQuery2 = assert.commandWorked(
- coll.find({a: 12000, b: 1}).comment("Query2 find command").explain("queryPlanner"));
- assert(explainQuery2.queryPlanner.hasOwnProperty("planCacheKey"));
- assert.neq(explainQuery2.queryPlanner.planCacheKey, profileObj0.planCacheKey, explainQuery2);
- assert.eq(explainQuery2.queryPlanner.planCacheKey, profileObj2.planCacheKey, explainQuery2);
-
- // Now drop an index. This should change the 'planCacheKey' value for queries, but not the
- // 'queryHash'.
- assert.commandWorked(coll.dropIndex({a: 1}));
- const explainQuery2PostCatalogChange = assert.commandWorked(
+ // The queryHash in explain should be different for query2 than the hash from query0 and query1.
+ explain = assert.commandWorked(
coll.find({a: 12000, b: 1}).comment("Query2 find command").explain("queryPlanner"));
- assert.eq(explainQuery2.queryPlanner.queryHash,
- explainQuery2PostCatalogChange.queryPlanner.queryHash);
- assert.neq(explainQuery2.queryPlanner.planCacheKey,
- explainQuery2PostCatalogChange.queryPlanner.planCacheKey);
+ assert(explain.queryPlanner.hasOwnProperty("queryHash"));
+ assert.neq(explain.queryPlanner.queryHash, profileObj0.queryHash, () => tojson(explain));
+ assert.eq(explain.queryPlanner.queryHash, profileObj2.queryHash, () => tojson(explain));
})();
diff --git a/jstests/core/query_hash_stability.js b/jstests/core/query_hash_stability.js
deleted file mode 100644
index 14ae20fdb98..00000000000
--- a/jstests/core/query_hash_stability.js
+++ /dev/null
@@ -1,56 +0,0 @@
-/**
- * Test that 'queryHash' and 'planCacheKey' from explain() output have sensible values
- * across catalog changes.
- */
-(function() {
- "use strict";
- load('jstests/libs/fixture_helpers.js'); // For and isMongos().
-
- const collName = "query_hash_stability";
- const coll = db[collName];
- coll.drop();
- // Be sure the collection exists.
- assert.commandWorked(coll.insert({x: 5}));
-
- function getPlanCacheKeyFromExplain(explainRes) {
- const hash = FixtureHelpers.isMongos(db)
- ? explainRes.queryPlanner.winningPlan.shards[0].planCacheKey
- : explainRes.queryPlanner.planCacheKey;
- assert.eq(typeof(hash), "string");
- return hash;
- }
-
- function getQueryHashFromExplain(explainRes) {
- const hash = FixtureHelpers.isMongos(db)
- ? explainRes.queryPlanner.winningPlan.shards[0].queryHash
- : explainRes.queryPlanner.queryHash;
- assert.eq(typeof(hash), "string");
- return hash;
- }
-
- const query = {x: 3};
-
- const initialExplain = coll.find(query).explain();
-
- // Add a sparse index.
- assert.commandWorked(coll.createIndex({x: 1}, {sparse: true}));
-
- const withIndexExplain = coll.find(query).explain();
-
- // 'queryHash' shouldn't change across catalog changes.
- assert.eq(getQueryHashFromExplain(initialExplain), getQueryHashFromExplain(withIndexExplain));
- // We added an index so the plan cache key changed.
- assert.neq(getPlanCacheKeyFromExplain(initialExplain),
- getPlanCacheKeyFromExplain(withIndexExplain));
-
- // Drop the index.
- assert.commandWorked(coll.dropIndex({x: 1}));
- const postDropExplain = coll.find(query).explain();
-
- // 'queryHash' shouldn't change across catalog changes.
- assert.eq(getQueryHashFromExplain(initialExplain), getQueryHashFromExplain(postDropExplain));
-
- // The 'planCacheKey' should be the same as what it was before we dropped the index.
- assert.eq(getPlanCacheKeyFromExplain(initialExplain),
- getPlanCacheKeyFromExplain(postDropExplain));
-})();
diff --git a/jstests/core/wildcard_index_cached_plans.js b/jstests/core/wildcard_index_cached_plans.js
index d0c2a50abe9..f1394273f13 100644
--- a/jstests/core/wildcard_index_cached_plans.js
+++ b/jstests/core/wildcard_index_cached_plans.js
@@ -44,17 +44,16 @@
return null;
}
- function getPlanCacheKeyFromExplain(explainRes) {
+ function getQueryHashFromExplain(explainRes) {
const hash = FixtureHelpers.isMongos(db)
- ? explainRes.queryPlanner.winningPlan.shards[0].planCacheKey
- : explainRes.queryPlanner.planCacheKey;
+ ? explainRes.queryPlanner.winningPlan.shards[0].queryHash
+ : explainRes.queryPlanner.queryHash;
assert.eq(typeof(hash), "string");
return hash;
}
- function getPlanCacheKey(query) {
- return getPlanCacheKeyFromExplain(
- assert.commandWorked(coll.explain().find(query).finish()));
+ function getQueryHash(query) {
+ return getQueryHashFromExplain(assert.commandWorked(coll.explain().find(query).finish()));
}
const query = {a: 1, b: 1};
@@ -89,7 +88,7 @@
for (let i = 0; i < 2; i++) {
assert.eq(coll.find({a: 1, b: null}).itcount(), 1000);
}
- assert.neq(getPlanCacheKey(queryWithBNull), getPlanCacheKey(query));
+ assert.neq(getQueryHash(queryWithBNull), getQueryHash(query));
// There should only have been one solution for the above query, so it would not get cached.
assert.eq(getCacheEntryForQuery({a: 1, b: null}), null);
@@ -118,8 +117,8 @@
// Check that the shapes are different since the query which matches on a string will not
// be eligible to use the b.$** index (since the index has a different collation).
- assert.neq(getPlanCacheKeyFromExplain(queryWithoutStringExplain),
- getPlanCacheKeyFromExplain(queryWithStringExplain));
+ assert.neq(getQueryHashFromExplain(queryWithoutStringExplain),
+ getQueryHashFromExplain(queryWithStringExplain));
})();
// Check that indexability discriminators work with partial wildcard indexes.
@@ -141,7 +140,7 @@
// Check that the shapes are different since the query which searches for a value not
// included by the partial filter expression won't be eligible to use the $** index.
- assert.neq(getPlanCacheKeyFromExplain(queryIndexedExplain),
- getPlanCacheKeyFromExplain(queryUnindexedExplain));
+ assert.neq(getQueryHashFromExplain(queryIndexedExplain),
+ getQueryHashFromExplain(queryUnindexedExplain));
})();
})();
diff --git a/jstests/noPassthrough/log_and_profile_query_hash.js b/jstests/noPassthrough/log_and_profile_query_hash.js
index 2a0757689a6..8c9db4e7102 100644
--- a/jstests/noPassthrough/log_and_profile_query_hash.js
+++ b/jstests/noPassthrough/log_and_profile_query_hash.js
@@ -4,7 +4,7 @@
(function() {
"use strict";
- // For getLatestProfilerEntry().
+ // For getLatestProfilerEntry
load("jstests/libs/profiler.js");
// Prevent the mongo shell from gossiping its cluster time, since this will increase the amount
@@ -50,8 +50,8 @@
}
// Run the find command, retrieve the corresponding profile object and log line, then ensure
- // that both the profile object and log line have matching stable query hashes (if any).
- function runTestsAndGetHashes(db, {comment, test, hasQueryHash}) {
+ // that both the profile object and log line have matching query hashes (if any).
+ function runTestsAndGetHash(db, {comment, test, hasQueryHash}) {
assert.commandWorked(db.adminCommand({clearLog: "global"}));
assert.doesNotThrow(() => test(db, comment));
const log = assert.commandWorked(db.adminCommand({getLog: "global"})).log;
@@ -61,19 +61,13 @@
const logLine = retrieveLogLine(log, profileEntry);
assert.neq(logLine, null);
- // Confirm that the query hashes either exist or don't exist in both log and profile
- // entries. If the queryHash and planCacheKey exist, ensure that the hashes from the
- // profile entry match the log line.
+ // Confirm that the query hash either exists or does not exist in both log and profile
+ // entries. If the queryHash exists, ensures that the hash from the profile entry
+ // exists withing the log line.
assert.eq(hasQueryHash, profileEntry.hasOwnProperty("queryHash"));
- assert.eq(hasQueryHash, profileEntry.hasOwnProperty("planCacheKey"));
assert.eq(hasQueryHash, (logLine.indexOf(profileEntry["queryHash"]) >= 0));
- assert.eq(hasQueryHash, (logLine.indexOf(profileEntry["planCacheKey"]) >= 0));
- if (hasQueryHash) {
- return {
- queryHash: profileEntry["queryHash"],
- planCacheKey: profileEntry["planCacheKey"]
- };
- }
+ if (hasQueryHash)
+ return profileEntry["queryHash"];
return null;
}
@@ -118,14 +112,14 @@
}
];
- const hashValues = testList.map((testCase) => runTestsAndGetHashes(testDB, testCase));
+ const hashValues = testList.map((testCase) => runTestsAndGetHash(testDB, testCase));
- // Confirm that the same shape of query has the same hashes.
+ // Confirm that the same shape of query has the same queryHash.
assert.neq(hashValues[0], hashValues[1]);
assert.eq(hashValues[1], hashValues[2]);
- // Test that the expected 'planCacheKey' and 'queryHash' are included in the transitional
- // log lines when an inactive cache entry is created.
+ // Test that the expected queryHash is included in the transitional log lines when an inactive
+ // cache entry is created.
assert.commandWorked(testDB.setLogLevel(1, "query"));
const testInactiveCreationLog = {
comment: "Test Creating inactive entry.",
@@ -140,16 +134,14 @@
hasQueryHash: true
};
- const onCreationHashes = runTestsAndGetHashes(testDB, testInactiveCreationLog);
+ const onCreationHash = runTestsAndGetHash(testDB, testInactiveCreationLog);
const log = assert.commandWorked(testDB.adminCommand({getLog: "global"})).log;
- // Fetch the line that logs when an inactive cache entry is created for the query with
- // 'planCacheKey' and 'queryHash'. Confirm only one line does this.
+ // Fetch the line that logs when an inactive cache entry is created for the query with queryHash
+ // onCreationHash. Confirm only one line does this.
const creationLogList = log.filter(
- logLine =>
- (logLine.indexOf("Creating inactive cache entry for query shape query") != -1 &&
- logLine.indexOf("planCacheKey " + String(onCreationHashes.planCacheKey)) != -1 &&
- logLine.indexOf("queryHash " + String(onCreationHashes.queryHash)) != -1));
+ logLine => (logLine.indexOf("Creating inactive cache entry for query shape query") != -1 &&
+ logLine.indexOf(String(onCreationHash)) != -1));
assert.eq(1, creationLogList.length);
MongoRunner.stopMongod(conn);
diff --git a/jstests/noPassthrough/plan_cache_stats_agg_source.js b/jstests/noPassthrough/plan_cache_stats_agg_source.js
index cee1aa15907..cd5cde5e903 100644
--- a/jstests/noPassthrough/plan_cache_stats_agg_source.js
+++ b/jstests/noPassthrough/plan_cache_stats_agg_source.js
@@ -91,11 +91,9 @@
assert.eq(entryStats.createdFromQuery.projection, {});
assert(!entryStats.createdFromQuery.hasOwnProperty("collation"));
- // Verify that $planCacheStats reports the same 'queryHash' and 'planCacheKey' as explain
- // for this query shape.
+ // Verify that $planCacheStats reports the same 'queryHash' as explain for this query shape.
explain = assert.commandWorked(coll.find({a: 1, b: 1}).explain());
assert.eq(entryStats.queryHash, explain.queryPlanner.queryHash);
- assert.eq(entryStats.planCacheKey, explain.queryPlanner.planCacheKey);
// Since the query shape was only run once, the plan cache entry should not be active.
assert.eq(entryStats.isActive, false);