summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Storch <david.storch@mongodb.com>2019-11-26 14:29:44 +0000
committerevergreen <evergreen@mongodb.com>2019-11-26 14:29:44 +0000
commit36ff260fe666489b8e8021882ee1ec3315526c92 (patch)
tree7de6c2a0da9c3766c56a1a7f4a7dadf141df9203
parent03f3b000c26146a8194bebb6124623ac0ebf20ec (diff)
downloadmongo-36ff260fe666489b8e8021882ee1ec3315526c92.tar.gz
SERVER-44701 Remove support for 'planCacheListQueryShapes' and 'planCacheListPlans'.
As an alternative, use the $planCacheStats aggregation stage. This metadata source, when placed first in the pipeline, returns one document per plan cache entry for a particular collection. This data can then be filtered, projected, grouped, or otherwise processed with the full power of MQL.
-rw-r--r--jstests/auth/lib/commands_lib.js25
-rw-r--r--jstests/core/collation_plan_cache.js134
-rw-r--r--jstests/core/commands_namespace_parsing.js12
-rw-r--r--jstests/core/index_filter_commands.js193
-rw-r--r--jstests/core/operation_latency_histogram.js2
-rw-r--r--jstests/core/plan_cache_clear.js116
-rw-r--r--jstests/core/plan_cache_list_plans.js134
-rw-r--r--jstests/core/plan_cache_list_shapes.js80
-rw-r--r--jstests/core/plan_cache_shell_helpers.js197
-rw-r--r--jstests/core/profile_query_hash.js27
-rw-r--r--jstests/core/restart_catalog.js2
-rw-r--r--jstests/core/views/views_all_commands.js8
-rw-r--r--jstests/libs/override_methods/network_error_and_txn_override.js2
-rw-r--r--jstests/noPassthrough/global_operation_latency_histogram.js4
-rw-r--r--jstests/noPassthrough/plan_cache_index_create.js16
-rw-r--r--jstests/noPassthrough/plan_cache_list_failed_plans.js32
-rw-r--r--jstests/noPassthrough/plan_cache_list_plans_new_format.js111
-rw-r--r--jstests/noPassthrough/query_knobs_validation.js1
-rw-r--r--jstests/noPassthroughWithMongod/plan_cache_replanning.js49
-rw-r--r--jstests/replsets/plan_cache_slaveok.js28
-rw-r--r--jstests/sharding/database_versioning_all_commands.js23
-rw-r--r--jstests/sharding/safe_secondary_reads_drop_recreate.js11
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js11
-rw-r--r--jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js11
-rw-r--r--jstests/sharding/track_unsharded_collections_check_shard_version.js15
-rw-r--r--src/mongo/db/commands/SConscript1
-rw-r--r--src/mongo/db/commands/index_filter_commands.cpp4
-rw-r--r--src/mongo/db/commands/plan_cache_clear_command.cpp183
-rw-r--r--src/mongo/db/commands/plan_cache_commands.cpp374
-rw-r--r--src/mongo/db/commands/plan_cache_commands.h170
-rw-r--r--src/mongo/db/commands/plan_cache_commands_test.cpp681
-rw-r--r--src/mongo/db/query/plan_cache.h3
-rw-r--r--src/mongo/db/query/query_knobs.idl67
-rw-r--r--src/mongo/embedded/mongo_embedded/mongo_embedded_test.cpp2
-rw-r--r--src/mongo/s/commands/SConscript2
-rw-r--r--src/mongo/s/commands/cluster_plan_cache_clear_cmd.cpp (renamed from src/mongo/s/commands/cluster_plan_cache_cmd.cpp)73
-rw-r--r--src/mongo/shell/collection.js33
37 files changed, 818 insertions, 2019 deletions
diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js
index 4f1f1d88899..91eeaefe1da 100644
--- a/jstests/auth/lib/commands_lib.js
+++ b/jstests/auth/lib/commands_lib.js
@@ -4585,31 +4585,6 @@ var authCommandsLib = {
]
},
{
- testname: "planCacheRead",
- command: {planCacheListQueryShapes: "x"},
- skipSharded: true,
- setup: function(db) {
- db.x.save({});
- },
- teardown: function(db) {
- db.x.drop();
- },
- testcases: [
- {
- runOnDb: firstDbName,
- roles: roles_readDbAdmin,
- privileges:
- [{resource: {db: firstDbName, collection: "x"}, actions: ["planCacheRead"]}],
- },
- {
- runOnDb: secondDbName,
- roles: roles_readDbAdminAny,
- privileges:
- [{resource: {db: secondDbName, collection: "x"}, actions: ["planCacheRead"]}],
- },
- ]
- },
- {
testname: "planCacheWrite",
command: {planCacheClear: "x"},
skipSharded: true,
diff --git a/jstests/core/collation_plan_cache.js b/jstests/core/collation_plan_cache.js
index 97d7f220f54..cf7f75627cf 100644
--- a/jstests/core/collation_plan_cache.js
+++ b/jstests/core/collation_plan_cache.js
@@ -5,7 +5,9 @@
// # former operation may be routed to a secondary in the replica set, whereas the latter must be
// # routed to the primary.
// assumes_read_preference_unchanged,
+// assumes_read_concern_unchanged,
// does_not_support_stepdowns,
+// assumes_against_mongod_not_mongos,
// ]
(function() {
'use strict';
@@ -13,6 +15,14 @@
var coll = db.collation_plan_cache;
coll.drop();
+function dumpPlanCacheState() {
+ return coll.aggregate([{$planCacheStats: {}}]).toArray();
+}
+
+function getPlansByQuery(match) {
+ return coll.aggregate([{$planCacheStats: {}}, {$match: match}]).toArray();
+}
+
assert.commandWorked(coll.insert({a: 'foo', b: 5}));
// We need two indexes that each query can use so that a plan cache entry is created.
@@ -23,96 +33,65 @@ assert.commandWorked(coll.createIndex({a: 1, b: 1}, {collation: {locale: 'en_US'
// shape.
assert.commandWorked(coll.createIndex({b: 1}, {collation: {locale: 'fr_CA'}}));
-// listQueryShapes().
-
// Run a query so that an entry is inserted into the cache.
assert.commandWorked(
coll.runCommand("find", {filter: {a: 'foo', b: 5}, collation: {locale: "en_US"}}),
'find command failed');
// The query shape should have been added.
-var shapes = coll.getPlanCache().listQueryShapes();
+var shapes = coll.aggregate([{$planCacheStats: {}}]).toArray();
assert.eq(1, shapes.length, 'unexpected cache size after running query');
-let filteredShape0 = shapes[0];
-delete filteredShape0.queryHash;
-assert.eq(filteredShape0,
+assert.eq(shapes[0].createdFromQuery.query, {a: 'foo', b: 5}, shapes);
+assert.eq(shapes[0].createdFromQuery.sort, {}, shapes);
+assert.eq(shapes[0].createdFromQuery.projection, {}, shapes);
+assert.eq(shapes[0].createdFromQuery.collation,
{
- query: {a: 'foo', b: 5},
- sort: {},
- projection: {},
- collation: {
- locale: 'en_US',
- caseLevel: false,
- caseFirst: 'off',
- strength: 3,
- numericOrdering: false,
- alternate: 'non-ignorable',
- maxVariable: 'punct',
- normalization: false,
- backwards: false,
- version: '57.1'
- }
+ locale: 'en_US',
+ caseLevel: false,
+ caseFirst: 'off',
+ strength: 3,
+ numericOrdering: false,
+ alternate: 'non-ignorable',
+ maxVariable: 'punct',
+ normalization: false,
+ backwards: false,
+ version: '57.1'
},
- 'unexpected query shape returned from listQueryShapes()');
+ shapes);
coll.getPlanCache().clear();
-// getPlansByQuery().
-
-// Passing a query with an empty collation object should throw.
-assert.throws(function() {
- coll.getPlanCache().getPlansByQuery(
- {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {}});
-}, [], 'empty collation object should throw');
-
-// Passing a query with an invalid collation object should throw.
-assert.throws(function() {
- coll.getPlanCache().getPlansByQuery(
- {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {bad: "value"}});
-}, [], 'invalid collation object should throw');
-
// Run a query so that an entry is inserted into the cache.
assert.commandWorked(
- coll.runCommand("find", {filter: {a: 'foo', b: 5}, collation: {locale: "en_US"}}),
- 'find command failed');
+ coll.runCommand("find", {filter: {a: 'foo', b: 5}, collation: {locale: "en_US"}}));
// The query should have cached plans.
assert.lt(0,
- coll.getPlanCache()
- .getPlansByQuery(
- {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {locale: 'en_US'}})
- .plans.length,
- 'unexpected number of cached plans for query');
-
-// Test passing the query, sort, projection, and collation to getPlansByQuery() as separate
-// arguments.
-assert.lt(
- 0,
- coll.getPlanCache().getPlansByQuery({a: 'foo', b: 5}, {}, {}, {locale: 'en_US'}).plans.length,
- 'unexpected number of cached plans for query');
-
-// Test passing the query, sort, projection, and collation to getPlansByQuery() as separate
-// arguments.
-assert.eq(0,
- coll.getPlanCache().getPlansByQuery({a: 'foo', b: 5}).plans.length,
- 'unexpected number of cached plans for query');
+ getPlansByQuery({
+ 'createdFromQuery.query': {a: 'foo', b: 5},
+ 'createdFromQuery.collation.locale': 'en_US'
+ }).length,
+ dumpPlanCacheState());
// A query with a different collation should have no cached plans.
assert.eq(0,
- coll.getPlanCache()
- .getPlansByQuery(
- {query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {locale: 'fr_CA'}})
- .plans.length,
- 'unexpected number of cached plans for query');
+ getPlansByQuery({
+ 'createdFromQuery.query': {a: 'foo', b: 5},
+ 'createdFromQuery.sort': {},
+ 'createdFromQuery.projection': {},
+ 'createdFromQuery.collation.locale': 'fr_CA'
+ }).length,
+ dumpPlanCacheState());
// A query with different string locations should have no cached plans.
-assert.eq(
- 0,
- coll.getPlanCache()
- .getPlansByQuery(
- {query: {a: 'foo', b: 'bar'}, sort: {}, projection: {}, collation: {locale: 'en_US'}})
- .plans.length,
- 'unexpected number of cached plans for query');
+assert.eq(0,
+ getPlansByQuery({
+ 'createdFromQuery.query': {a: 'foo', b: 'bar'},
+ 'createdFromQuery.sort': {},
+ 'createdFromQuery.projection': {},
+ 'createdFromQuery.collation': {locale: 'en_US'}
+ }).length,
+ dumpPlanCacheState());
coll.getPlanCache().clear();
@@ -134,29 +113,26 @@ assert.throws(function() {
assert.commandWorked(
coll.runCommand("find", {filter: {a: 'foo', b: 5}, collation: {locale: "en_US"}}),
'find command failed');
-assert.eq(
- 1, coll.getPlanCache().listQueryShapes().length, 'unexpected cache size after running query');
+assert.eq(1, coll.aggregate([{$planCacheStats: {}}]).itcount(), dumpPlanCacheState());
// Dropping a query shape with a different collation should have no effect.
coll.getPlanCache().clearPlansByQuery(
{query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {locale: 'fr_CA'}});
-assert.eq(1,
- coll.getPlanCache().listQueryShapes().length,
- 'unexpected cache size after dropping uncached query shape');
+assert.eq(1, coll.aggregate([{$planCacheStats: {}}]).itcount(), dumpPlanCacheState());
// Dropping a query shape with different string locations should have no effect.
coll.getPlanCache().clearPlansByQuery(
{query: {a: 'foo', b: 'bar'}, sort: {}, projection: {}, collation: {locale: 'en_US'}});
-assert.eq(1,
- coll.getPlanCache().listQueryShapes().length,
- 'unexpected cache size after dropping uncached query shape');
+assert.eq(1, coll.aggregate([{$planCacheStats: {}}]).itcount(), dumpPlanCacheState());
// Dropping query shape.
coll.getPlanCache().clearPlansByQuery(
{query: {a: 'foo', b: 5}, sort: {}, projection: {}, collation: {locale: 'en_US'}});
-assert.eq(0,
- coll.getPlanCache().listQueryShapes().length,
- 'unexpected cache size after dropping query shapes');
+assert.eq(0, coll.aggregate([{$planCacheStats: {}}]).itcount(), dumpPlanCacheState());
+
+// 'collation' parameter is not allowed with 'query' parameter for 'planCacheClear'.
+assert.commandFailedWithCode(coll.runCommand('planCacheClear', {collation: {locale: "en_US"}}),
+ ErrorCodes.BadValue);
// Index filter commands.
diff --git a/jstests/core/commands_namespace_parsing.js b/jstests/core/commands_namespace_parsing.js
index 62c435d6801..1eda20c57f2 100644
--- a/jstests/core/commands_namespace_parsing.js
+++ b/jstests/core/commands_namespace_parsing.js
@@ -200,18 +200,6 @@ assertFailsWithInvalidNamespacesForField("planCacheSetFilter",
assertFailsWithInvalidNamespacesForField(
"planCacheClearFilters", {planCacheClearFilters: ""}, isNotFullyQualified, isNotAdminCommand);
-// Test planCacheListQueryShapes fails with an invalid collection name.
-assertFailsWithInvalidNamespacesForField("planCacheListQueryShapes",
- {planCacheListQueryShapes: ""},
- isNotFullyQualified,
- isNotAdminCommand);
-
-// Test planCacheListPlans fails with an invalid collection name.
-assertFailsWithInvalidNamespacesForField("planCacheListPlans",
- {planCacheListPlans: "", query: {}},
- isNotFullyQualified,
- isNotAdminCommand);
-
// Test planCacheClear fails with an invalid collection name.
assertFailsWithInvalidNamespacesForField(
"planCacheClear", {planCacheClear: ""}, isNotFullyQualified, isNotAdminCommand);
diff --git a/jstests/core/index_filter_commands.js b/jstests/core/index_filter_commands.js
index de110e5abb9..487940340d8 100644
--- a/jstests/core/index_filter_commands.js
+++ b/jstests/core/index_filter_commands.js
@@ -16,7 +16,8 @@
* server of query shape to list of index specs.
*
* Only time we might need to execute a query is to check the plan cache state. We would do this
- * with the planCacheListPlans command on the same query shape with the index filters.
+ * using the $planCacheStats aggregation metadata source on the same query shape with the index
+ * filters.
*
* @tags: [
* # Cannot implicitly shard accessed collections because of collection existing when none
@@ -26,23 +27,26 @@
* # may be routed to a secondary in the replica set, whereas the latter must be routed to the
* # primary.
* assumes_read_preference_unchanged,
+ * assumes_read_concern_unchanged,
* does_not_support_stepdowns,
+ * assumes_against_mongod_not_mongos,
* ]
*/
+(function() {
load("jstests/libs/analyze_plan.js");
-var t = db.jstests_index_filter_commands;
+const coll = db.jstests_index_filter_commands;
-t.drop();
+coll.drop();
// Setup the data so that plans will not tie given the indices and query
// below. Tying plans will not be cached, and we need cached shapes in
// order to test the filter functionality.
-t.save({a: 1});
-t.save({a: 1});
-t.save({a: 1, b: 1});
-t.save({_id: 1});
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 1}));
+assert.commandWorked(coll.insert({a: 1, b: 1}));
+assert.commandWorked(coll.insert({_id: 1}));
// Add 2 indexes.
// 1st index is more efficient.
@@ -50,9 +54,9 @@ t.save({_id: 1});
var indexA1 = {a: 1};
var indexA1B1 = {a: 1, b: 1};
var indexA1C1 = {a: 1, c: 1};
-t.ensureIndex(indexA1);
-t.ensureIndex(indexA1B1);
-t.ensureIndex(indexA1C1);
+assert.commandWorked(coll.createIndex(indexA1));
+assert.commandWorked(coll.createIndex(indexA1B1));
+assert.commandWorked(coll.createIndex(indexA1C1));
var queryAA = {a: "A"};
var queryA1 = {a: 1, b: 1};
@@ -67,32 +71,27 @@ var queryID = {_id: 1};
// Utility function to list index filters.
function getFilters(collection) {
if (collection == undefined) {
- collection = t;
+ collection = coll;
}
var res = collection.runCommand('planCacheListFilters');
- print('planCacheListFilters() = ' + tojson(res));
assert.commandWorked(res, 'planCacheListFilters failed');
assert(res.hasOwnProperty('filters'), 'filters missing from planCacheListFilters result');
return res.filters;
}
-// If query shape is in plan cache,
-// planCacheListPlans returns non-empty array of plans.
-function planCacheContains(shape) {
- var res = t.runCommand('planCacheListPlans', shape);
- assert.commandWorked(res);
- return res.plans.length > 0;
-}
+// Returns the plan cache entry for the given value of 'createdFromQuery', or null if no such plan
+// cache entry exists.
+function planCacheEntryForQuery(createdFromQuery) {
+ const res = coll.getPlanCache().list([{$match: {createdFromQuery: createdFromQuery}}]);
+ if (res.length === 0) {
+ return null;
+ }
-// Utility function to list plans for a query.
-function getPlans(shape) {
- var res = t.runCommand('planCacheListPlans', shape);
- assert.commandWorked(res, 'planCacheListPlans(' + tojson(shape, '', true) + ' failed');
- assert(res.hasOwnProperty('plans'),
- 'plans missing from planCacheListPlans(' + tojson(shape, '', true) + ') result');
- return res.plans;
+ assert.eq(1, res.length, res);
+ return res[0];
}
+// Utility function to list plans for a query.
// Attempting to retrieve index filters on a non-existent collection
// will return empty results.
var missingCollection = db.jstests_index_filter_commands_missing;
@@ -106,13 +105,12 @@ var filters = getFilters();
assert.eq(0, filters.length, 'unexpected number of index filters in planCacheListFilters result');
// Check details of winning plan in plan cache before setting index filter.
-assert.eq(1, t.find(queryA1, projectionA1).sort(sortA1).itcount(), 'unexpected document count');
+assert.eq(1, coll.find(queryA1, projectionA1).sort(sortA1).itcount(), 'unexpected document count');
var shape = {query: queryA1, sort: sortA1, projection: projectionA1};
-var planBeforeSetFilter = getPlans(shape)[0];
-print('Winning plan (before setting index filters) = ' + tojson(planBeforeSetFilter));
-// Check filterSet field in plan details
-assert.eq(
- false, planBeforeSetFilter.filterSet, 'missing or invalid filterSet field in plan details');
+var planBeforeSetFilter = planCacheEntryForQuery(shape);
+assert.neq(null, planBeforeSetFilter, coll.getPlanCache().list());
+// Check 'indexFilterSet' field in plan details
+assert.eq(false, planBeforeSetFilter.indexFilterSet, planBeforeSetFilter);
// Adding index filters to a non-existent collection should be an error.
assert.commandFailed(missingCollection.runCommand(
@@ -120,7 +118,7 @@ assert.commandFailed(missingCollection.runCommand(
{query: queryA1, sort: sortA1, projection: projectionA1, indexes: [indexA1B1, indexA1C1]}));
// Add index filters for simple query.
-assert.commandWorked(t.runCommand(
+assert.commandWorked(coll.runCommand(
'planCacheSetFilter',
{query: queryA1, sort: sortA1, projection: projectionA1, indexes: [indexA1B1, indexA1C1]}));
filters = getFilters();
@@ -134,24 +132,24 @@ assert.eq(indexA1B1, filters[0].indexes[0], 'unexpected first index');
assert.eq(indexA1C1, filters[0].indexes[1], 'unexpected first index');
// Plans for query shape should be removed after setting index filter.
-assert(!planCacheContains(shape), 'plan cache for query shape not flushed after updating filter');
+assert.eq(null, planCacheEntryForQuery(shape), coll.getPlanCache().list());
// Check details of winning plan in plan cache after setting filter and re-executing query.
-assert.eq(1, t.find(queryA1, projectionA1).sort(sortA1).itcount(), 'unexpected document count');
-planAfterSetFilter = getPlans(shape)[0];
-print('Winning plan (after setting index filter) = ' + tojson(planAfterSetFilter));
-// Check filterSet field in plan details
-assert.eq(true, planAfterSetFilter.filterSet, 'missing or invalid filterSet field in plan details');
+assert.eq(1, coll.find(queryA1, projectionA1).sort(sortA1).itcount(), 'unexpected document count');
+planAfterSetFilter = planCacheEntryForQuery(shape);
+assert.neq(null, planAfterSetFilter, coll.getPlanCache().list());
+// Check 'indexFilterSet' field in plan details
+assert.eq(true, planAfterSetFilter.indexFilterSet, planAfterSetFilter);
// Execute query with cursor.hint(). Check that user-provided hint is overridden.
// Applying the index filters will remove the user requested index from the list
// of indexes provided to the planner.
// If the planner still tries to use the user hint, we will get a 'bad hint' error.
-t.find(queryA1, projectionA1).sort(sortA1).hint(indexA1).itcount();
+coll.find(queryA1, projectionA1).sort(sortA1).hint(indexA1).itcount();
// Test that index filters are ignored for idhack queries.
-assert.commandWorked(t.runCommand('planCacheSetFilter', {query: queryID, indexes: [indexA1]}));
-var explain = t.explain("executionStats").find(queryID).finish();
+assert.commandWorked(coll.runCommand('planCacheSetFilter', {query: queryID, indexes: [indexA1]}));
+var explain = coll.explain("executionStats").find(queryID).finish();
assert.commandWorked(explain);
var planStage = getPlanStage(explain.executionStats.executionStages, 'IDHACK');
assert.neq(null, planStage);
@@ -160,12 +158,12 @@ assert.neq(null, planStage);
// Clearing filters on a missing collection should be a no-op.
assert.commandWorked(missingCollection.runCommand('planCacheClearFilters'));
// Clear the filters set earlier.
-assert.commandWorked(t.runCommand('planCacheClearFilters'));
+assert.commandWorked(coll.runCommand('planCacheClearFilters'));
filters = getFilters();
assert.eq(0, filters.length, 'filters not cleared after successful planCacheClearFilters command');
-// Plans should be removed after clearing filters
-assert(!planCacheContains(shape), 'plan cache for query shape not flushed after clearing filters');
+// Plans should be removed after clearing filters.
+assert.eq(null, planCacheEntryForQuery(shape), coll.getPlanCache().list());
print('Plan details before setting filter = ' + tojson(planBeforeSetFilter.details, '', true));
print('Plan details after setting filter = ' + tojson(planAfterSetFilter.details, '', true));
@@ -174,87 +172,85 @@ print('Plan details after setting filter = ' + tojson(planAfterSetFilter.details
// Tests for the 'indexFilterSet' explain field.
//
-if (db.isMaster().msg !== "isdbgrid") {
- // No filter.
- t.getPlanCache().clear();
- assert.eq(false, t.find({z: 1}).explain('queryPlanner').queryPlanner.indexFilterSet);
- assert.eq(false,
- t.find(queryA1, projectionA1)
- .sort(sortA1)
- .explain('queryPlanner')
- .queryPlanner.indexFilterSet);
-
- // With one filter set.
- assert.commandWorked(t.runCommand('planCacheSetFilter', {query: {z: 1}, indexes: [{z: 1}]}));
- assert.eq(true, t.find({z: 1}).explain('queryPlanner').queryPlanner.indexFilterSet);
- assert.eq(false,
- t.find(queryA1, projectionA1)
- .sort(sortA1)
- .explain('queryPlanner')
- .queryPlanner.indexFilterSet);
-
- // With two filters set.
- assert.commandWorked(t.runCommand(
- 'planCacheSetFilter',
- {query: queryA1, projection: projectionA1, sort: sortA1, indexes: [indexA1B1, indexA1C1]}));
- assert.eq(true, t.find({z: 1}).explain('queryPlanner').queryPlanner.indexFilterSet);
- assert.eq(true,
- t.find(queryA1, projectionA1)
- .sort(sortA1)
- .explain('queryPlanner')
- .queryPlanner.indexFilterSet);
-}
+// No filter.
+coll.getPlanCache().clear();
+assert.eq(false, coll.find({z: 1}).explain('queryPlanner').queryPlanner.indexFilterSet);
+assert.eq(false,
+ coll.find(queryA1, projectionA1)
+ .sort(sortA1)
+ .explain('queryPlanner')
+ .queryPlanner.indexFilterSet);
+
+// With one filter set.
+assert.commandWorked(coll.runCommand('planCacheSetFilter', {query: {z: 1}, indexes: [{z: 1}]}));
+assert.eq(true, coll.find({z: 1}).explain('queryPlanner').queryPlanner.indexFilterSet);
+assert.eq(false,
+ coll.find(queryA1, projectionA1)
+ .sort(sortA1)
+ .explain('queryPlanner')
+ .queryPlanner.indexFilterSet);
+
+// With two filters set.
+assert.commandWorked(coll.runCommand(
+ 'planCacheSetFilter',
+ {query: queryA1, projection: projectionA1, sort: sortA1, indexes: [indexA1B1, indexA1C1]}));
+assert.eq(true, coll.find({z: 1}).explain('queryPlanner').queryPlanner.indexFilterSet);
+assert.eq(true,
+ coll.find(queryA1, projectionA1)
+ .sort(sortA1)
+ .explain('queryPlanner')
+ .queryPlanner.indexFilterSet);
//
// Tests for index filter commands and multiple indexes with the same key pattern.
//
-t.drop();
+coll.drop();
var collationEN = {locale: "en_US"};
-assert.commandWorked(t.createIndex(indexA1, {collation: collationEN, name: "a_1:en_US"}));
-assert.commandWorked(t.createIndex(indexA1, {name: "a_1"}));
+assert.commandWorked(coll.createIndex(indexA1, {collation: collationEN, name: "a_1:en_US"}));
+assert.commandWorked(coll.createIndex(indexA1, {name: "a_1"}));
-assert.commandWorked(t.insert({a: "a"}));
+assert.commandWorked(coll.insert({a: "a"}));
-assert.commandWorked(t.runCommand('planCacheSetFilter', {query: queryAA, indexes: [indexA1]}));
+assert.commandWorked(coll.runCommand('planCacheSetFilter', {query: queryAA, indexes: [indexA1]}));
-assert.commandWorked(t.runCommand('planCacheSetFilter',
- {query: queryAA, collation: collationEN, indexes: [indexA1]}));
+assert.commandWorked(coll.runCommand('planCacheSetFilter',
+ {query: queryAA, collation: collationEN, indexes: [indexA1]}));
// Ensure that index key patterns in planCacheSetFilter select any index with a matching key
// pattern.
-explain = t.find(queryAA).explain();
+explain = coll.find(queryAA).explain();
assert(isIxscan(db, explain.queryPlanner.winningPlan), "Expected index scan: " + tojson(explain));
-explain = t.find(queryAA).collation(collationEN).explain();
+explain = coll.find(queryAA).collation(collationEN).explain();
assert(isIxscan(db, explain.queryPlanner.winningPlan), "Expected index scan: " + tojson(explain));
// Ensure that index names in planCacheSetFilter only select matching names.
-assert.commandWorked(
- t.runCommand('planCacheSetFilter', {query: queryAA, collation: collationEN, indexes: ["a_1"]}));
+assert.commandWorked(coll.runCommand('planCacheSetFilter',
+ {query: queryAA, collation: collationEN, indexes: ["a_1"]}));
-explain = t.find(queryAA).collation(collationEN).explain();
+explain = coll.find(queryAA).collation(collationEN).explain();
assert(isCollscan(db, explain.queryPlanner.winningPlan), "Expected collscan: " + tojson(explain));
//
// Test that planCacheSetFilter and planCacheClearFilters allow queries containing $expr.
//
-t.drop();
-assert.commandWorked(t.insert({a: "a"}));
-assert.commandWorked(t.createIndex(indexA1, {name: "a_1"}));
+coll.drop();
+assert.commandWorked(coll.insert({a: "a"}));
+assert.commandWorked(coll.createIndex(indexA1, {name: "a_1"}));
-assert.commandWorked(t.runCommand(
+assert.commandWorked(coll.runCommand(
"planCacheSetFilter", {query: {a: "a", $expr: {$eq: ["$a", "a"]}}, indexes: [indexA1]}));
filters = getFilters();
assert.eq(1, filters.length, tojson(filters));
assert.eq({a: "a", $expr: {$eq: ["$a", "a"]}}, filters[0].query, tojson(filters[0]));
assert.commandWorked(
- t.runCommand("planCacheClearFilters", {query: {a: "a", $expr: {$eq: ["$a", "a"]}}}));
+ coll.runCommand("planCacheClearFilters", {query: {a: "a", $expr: {$eq: ["$a", "a"]}}}));
filters = getFilters();
assert.eq(0, filters.length, tojson(filters));
@@ -263,17 +259,18 @@ assert.eq(0, filters.length, tojson(filters));
// unbound variables.
//
-t.drop();
-assert.commandWorked(t.insert({a: "a"}));
-assert.commandWorked(t.createIndex(indexA1, {name: "a_1"}));
+coll.drop();
+assert.commandWorked(coll.insert({a: "a"}));
+assert.commandWorked(coll.createIndex(indexA1, {name: "a_1"}));
assert.commandFailed(
- t.runCommand("planCacheSetFilter",
- {query: {a: "a", $expr: {$eq: ["$a", "$$unbound"]}}, indexes: [indexA1]}));
+ coll.runCommand("planCacheSetFilter",
+ {query: {a: "a", $expr: {$eq: ["$a", "$$unbound"]}}, indexes: [indexA1]}));
filters = getFilters();
assert.eq(0, filters.length, tojson(filters));
assert.commandFailed(
- t.runCommand("planCacheClearFilters", {query: {a: "a", $expr: {$eq: ["$a", "$$unbound"]}}}));
+ coll.runCommand("planCacheClearFilters", {query: {a: "a", $expr: {$eq: ["$a", "$$unbound"]}}}));
filters = getFilters();
assert.eq(0, filters.length, tojson(filters));
+}());
diff --git a/jstests/core/operation_latency_histogram.js b/jstests/core/operation_latency_histogram.js
index 5e2b6a49ef7..455e3ee0603 100644
--- a/jstests/core/operation_latency_histogram.js
+++ b/jstests/core/operation_latency_histogram.js
@@ -167,7 +167,7 @@ testColl.dataSize();
lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
// PlanCache
-testColl.getPlanCache().listQueryShapes();
+testColl.getPlanCache().clear();
lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1);
// Commands which occur on the database only should not effect the collection stats.
diff --git a/jstests/core/plan_cache_clear.js b/jstests/core/plan_cache_clear.js
index 8170aa9595d..375facc100d 100644
--- a/jstests/core/plan_cache_clear.js
+++ b/jstests/core/plan_cache_clear.js
@@ -7,67 +7,92 @@
// # latter must be routed to the primary.
// # If all chunks are moved off of a shard, it can cause the plan cache to miss commands.
// assumes_read_preference_unchanged,
+// assumes_read_concern_unchanged,
// does_not_support_stepdowns,
// assumes_balancer_off,
+// assumes_against_mongod_not_mongos,
// ]
(function() {
-var t = db.jstests_plan_cache_clear;
-t.drop();
-
-// Utility function to list query shapes in cache.
-function getShapes(collection) {
- if (collection == undefined) {
- collection = t;
- }
- var res = collection.runCommand('planCacheListQueryShapes');
- print('planCacheListQueryShapes() = ' + tojson(res));
- assert.commandWorked(res, 'planCacheListQueryShapes failed');
- assert(res.hasOwnProperty('shapes'), 'shapes missing from planCacheListQueryShapes result');
- return res.shapes;
+const coll = db.jstests_plan_cache_clear;
+coll.drop();
+
+function numPlanCacheEntries() {
+ return coll.aggregate([{$planCacheStats: {}}]).itcount();
+}
+
+function dumpPlanCacheState() {
+ return coll.aggregate([{$planCacheStats: {}}]).toArray();
}
-t.save({a: 1, b: 1});
-t.save({a: 1, b: 2});
-t.save({a: 1, b: 2});
-t.save({a: 2, b: 2});
+assert.commandWorked(coll.insert({a: 1, b: 1}));
+assert.commandWorked(coll.insert({a: 1, b: 2}));
+assert.commandWorked(coll.insert({a: 1, b: 2}));
+assert.commandWorked(coll.insert({a: 2, b: 2}));
// We need two indices so that the MultiPlanRunner is executed.
-t.ensureIndex({a: 1});
-t.ensureIndex({a: 1, b: 1});
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({a: 1, b: 1}));
// Run a query so that an entry is inserted into the cache.
-assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count');
+assert.eq(1, coll.find({a: 1, b: 1}).itcount());
// Invalid key should be a no-op.
-assert.commandWorked(t.runCommand('planCacheClear', {query: {unknownfield: 1}}));
-assert.eq(1, getShapes().length, 'removing unknown query should not affecting exisiting entries');
+assert.commandWorked(coll.runCommand('planCacheClear', {query: {unknownfield: 1}}));
+assert.eq(1, numPlanCacheEntries(), dumpPlanCacheState());
+
+// Introduce a second plan cache entry.
+assert.eq(0, coll.find({a: 1, b: 1, c: 1}).itcount());
+assert.eq(2, numPlanCacheEntries(), dumpPlanCacheState());
-// Run a new query shape and drop it from the cache
-assert.eq(1, getShapes().length, 'unexpected cache size after running 2nd query');
-assert.commandWorked(t.runCommand('planCacheClear', {query: {a: 1, b: 1}}));
-assert.eq(0, getShapes().length, 'unexpected cache size after dropping 2nd query from cache');
+// Drop one of the two shapes from the cache.
+assert.commandWorked(coll.runCommand('planCacheClear', {query: {a: 1, b: 1}}),
+ dumpPlanCacheState());
+assert.eq(1, numPlanCacheEntries(), dumpPlanCacheState());
+
+// Drop the second shape from the cache.
+assert.commandWorked(coll.runCommand('planCacheClear', {query: {a: 1, b: 1, c: 1}}),
+ dumpPlanCacheState());
+assert.eq(0, numPlanCacheEntries(), dumpPlanCacheState());
// planCacheClear can clear $expr queries.
-assert.eq(1, t.find({a: 1, b: 1, $expr: {$eq: ['$a', 1]}}).itcount(), 'unexpected document count');
-assert.eq(1, getShapes().length, 'unexpected cache size after running 2nd query');
+assert.eq(1, coll.find({a: 1, b: 1, $expr: {$eq: ['$a', 1]}}).itcount());
+assert.eq(1, numPlanCacheEntries(), dumpPlanCacheState());
assert.commandWorked(
- t.runCommand('planCacheClear', {query: {a: 1, b: 1, $expr: {$eq: ['$a', 1]}}}));
-assert.eq(0, getShapes().length, 'unexpected cache size after dropping 2nd query from cache');
+ coll.runCommand('planCacheClear', {query: {a: 1, b: 1, $expr: {$eq: ['$a', 1]}}}));
+assert.eq(0, numPlanCacheEntries(), dumpPlanCacheState());
// planCacheClear fails with an $expr query with an unbound variable.
assert.commandFailed(
- t.runCommand('planCacheClear', {query: {a: 1, b: 1, $expr: {$eq: ['$a', '$$unbound']}}}));
+ coll.runCommand('planCacheClear', {query: {a: 1, b: 1, $expr: {$eq: ['$a', '$$unbound']}}}));
// Insert two more shapes into the cache.
-assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count');
-assert.eq(1, t.find({a: 1, b: 1}, {_id: 0, a: 1}).itcount(), 'unexpected document count');
+assert.eq(1, coll.find({a: 1, b: 1}).itcount());
+assert.eq(1, coll.find({a: 1, b: 1}, {_id: 0, a: 1}).itcount());
+assert.eq(2, numPlanCacheEntries(), dumpPlanCacheState());
+
+// Error cases.
+assert.commandFailedWithCode(coll.runCommand('planCacheClear', {query: 12345}),
+ ErrorCodes.BadValue);
+assert.commandFailedWithCode(coll.runCommand('planCacheClear', {query: /regex/}),
+ ErrorCodes.BadValue);
+assert.commandFailedWithCode(coll.runCommand('planCacheClear', {query: {a: {$no_such_op: 1}}}),
+ ErrorCodes.BadValue);
+// 'sort' parameter is not allowed without 'query' parameter.
+assert.commandFailedWithCode(coll.runCommand('planCacheClear', {sort: {a: 1}}),
+ ErrorCodes.BadValue);
+// 'projection' parameter is not allowed with 'query' parameter.
+assert.commandFailedWithCode(coll.runCommand('planCacheClear', {projection: {_id: 0, a: 1}}),
+ ErrorCodes.BadValue);
// Drop query cache. This clears all cached queries in the collection.
-res = t.runCommand('planCacheClear');
-print('planCacheClear() = ' + tojson(res));
-assert.commandWorked(res, 'planCacheClear failed');
-assert.eq(0, getShapes().length, 'plan cache should be empty after successful planCacheClear()');
+assert.commandWorked(coll.runCommand('planCacheClear'));
+assert.eq(0, numPlanCacheEntries(), dumpPlanCacheState());
+
+// Clearing the plan cache for a non-existent collection should succeed.
+const nonExistentColl = db.plan_cache_clear_nonexistent;
+nonExistentColl.drop();
+assert.commandWorked(nonExistentColl.runCommand('planCacheClear'));
//
// Query Plan Revision
@@ -86,11 +111,10 @@ assert.eq(0, getShapes().length, 'plan cache should be empty after successful pl
// Confirm that cache is empty.
const isMongos = db.adminCommand({isdbgrid: 1}).isdbgrid;
if (!isMongos) {
- assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count');
- assert.eq(1, getShapes().length, 'plan cache should not be empty after query');
- res = t.reIndex();
- print('reIndex result = ' + tojson(res));
- assert.eq(0, getShapes().length, 'plan cache should be empty after reIndex operation');
+ assert.eq(1, coll.find({a: 1, b: 1}).itcount());
+ assert.eq(1, numPlanCacheEntries(), dumpPlanCacheState());
+ assert.commandWorked(coll.reIndex());
+ assert.eq(0, numPlanCacheEntries(), dumpPlanCacheState());
}
// Case 2: You add or drop an index.
@@ -98,10 +122,10 @@ if (!isMongos) {
// Populate the cache with 1 entry.
// Add an index.
// Confirm that cache is empty.
-assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count');
-assert.eq(1, getShapes().length, 'plan cache should not be empty after query');
-t.ensureIndex({b: 1});
-assert.eq(0, getShapes().length, 'plan cache should be empty after adding index');
+assert.eq(1, coll.find({a: 1, b: 1}).itcount());
+assert.eq(1, numPlanCacheEntries(), dumpPlanCacheState());
+assert.commandWorked(coll.createIndex({b: 1}));
+assert.eq(0, numPlanCacheEntries(), dumpPlanCacheState());
// Case 3: The mongod process restarts
// Not applicable.
diff --git a/jstests/core/plan_cache_list_plans.js b/jstests/core/plan_cache_list_plans.js
index a077f9fafbe..f06ca63dd11 100644
--- a/jstests/core/plan_cache_list_plans.js
+++ b/jstests/core/plan_cache_list_plans.js
@@ -1,84 +1,103 @@
-// Test the planCacheListPlans command.
+// Tests for using $planCacheStats to list cached plans.
//
// @tags: [
// # This test attempts to perform queries and introspect the server's plan cache entries. The
// # former operation may be routed to a secondary in the replica set, whereas the latter must be
// # routed to the primary.
// assumes_read_preference_unchanged,
+// assumes_read_concern_unchanged,
// does_not_support_stepdowns,
// # If the balancer is on and chunks are moved, the plan cache can have entries with isActive:
// # false when the test assumes they are true because the query has already been run many times.
// assumes_balancer_off,
// inspects_whether_plan_cache_entry_is_active,
+// assumes_against_mongod_not_mongos,
// ]
(function() {
"use strict";
-let t = db.jstests_plan_cache_list_plans;
-t.drop();
+let coll = db.jstests_plan_cache_list_plans;
+coll.drop();
+
+function dumpPlanCacheState() {
+ return coll.aggregate([{$planCacheStats: {}}]).toArray();
+}
function getPlansForCacheEntry(query, sort, projection) {
- let key = {query: query, sort: sort, projection: projection};
- let res = t.runCommand('planCacheListPlans', key);
- assert.commandWorked(res, 'planCacheListPlans(' + tojson(key, '', true) + ' failed');
- assert(res.hasOwnProperty('plans'),
- 'plans missing from planCacheListPlans(' + tojson(key, '', true) + ') result');
- return res;
+ const match = {
+ 'createdFromQuery.query': query,
+ 'createdFromQuery.sort': sort,
+ 'createdFromQuery.projection': projection
+ };
+ const res = coll.aggregate([{$planCacheStats: {}}, {$match: match}]).toArray();
+ // We expect exactly one matching cache entry.
+ assert.eq(1, res.length, dumpPlanCacheState());
+ return res[0];
+}
+
+function assertNoCacheEntry(query, sort, projection) {
+ const match = {
+ 'createdFromQuery.query': query,
+ 'createdFromQuery.sort': sort,
+ 'createdFromQuery.projection': projection
+ };
+ assert.eq(0,
+ coll.aggregate([{$planCacheStats: {}}, {$match: match}]).itcount(),
+ dumpPlanCacheState());
}
// Assert that timeOfCreation exists in the cache entry. The difference between the current time
// and the time a plan was cached should not be larger than an hour.
function checkTimeOfCreation(query, sort, projection, date) {
- let key = {query: query, sort: sort, projection: projection};
- let res = t.runCommand('planCacheListPlans', key);
- assert.commandWorked(res, 'planCacheListPlans(' + tojson(key, '', true) + ' failed');
- assert(res.hasOwnProperty('timeOfCreation'), 'timeOfCreation missing from planCacheListPlans');
+ const match = {
+ 'createdFromQuery.query': query,
+ 'createdFromQuery.sort': sort,
+ 'createdFromQuery.projection': projection
+ };
+ const res = coll.aggregate([{$planCacheStats: {}}, {$match: match}]).toArray();
+ // We expect exactly one matching cache entry.
+ assert.eq(1, res.length, res);
+ const cacheEntry = res[0];
+
+ assert(cacheEntry.hasOwnProperty('timeOfCreation'), cacheEntry);
let kMillisecondsPerHour = 1000 * 60 * 60;
- assert.lte(Math.abs(date - res.timeOfCreation.getTime()),
- kMillisecondsPerHour,
- 'timeOfCreation value is incorrect');
+ assert.lte(
+ Math.abs(date - cacheEntry.timeOfCreation.getTime()), kMillisecondsPerHour, cacheEntry);
}
-assert.commandWorked(t.save({a: 1, b: 1}));
-assert.commandWorked(t.save({a: 1, b: 2}));
-assert.commandWorked(t.save({a: 1, b: 2}));
-assert.commandWorked(t.save({a: 2, b: 2}));
+assert.commandWorked(coll.insert({a: 1, b: 1}));
+assert.commandWorked(coll.insert({a: 1, b: 2}));
+assert.commandWorked(coll.insert({a: 1, b: 2}));
+assert.commandWorked(coll.insert({a: 2, b: 2}));
// We need two indices so that the MultiPlanRunner is executed.
-assert.commandWorked(t.ensureIndex({a: 1}));
-assert.commandWorked(t.ensureIndex({a: 1, b: 1}));
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({a: 1, b: 1}));
-// Invalid key should be an error.
-assert.eq([],
- getPlansForCacheEntry({unknownfield: 1}, {}, {}).plans,
- 'planCacheListPlans should return empty results on unknown query shape');
+// Check that there are no cache entries associated with an unknown field.
+assertNoCacheEntry({unknownfield: 1}, {}, {});
// Create a cache entry.
-assert.eq(
- 1, t.find({a: 1, b: 1}, {_id: 0, a: 1}).sort({a: -1}).itcount(), 'unexpected document count');
+assert.eq(1,
+ coll.find({a: 1, b: 1}, {_id: 0, a: 1}).sort({a: -1}).itcount(),
+ 'unexpected document count');
+// Verify that the time of creation listed for the plan cache entry is reasonably close to 'now'.
let now = (new Date()).getTime();
checkTimeOfCreation({a: 1, b: 1}, {a: -1}, {_id: 0, a: 1}, now);
// Retrieve plans for valid cache entry.
let entry = getPlansForCacheEntry({a: 1, b: 1}, {a: -1}, {_id: 0, a: 1});
-assert(entry.hasOwnProperty('works'),
- 'works missing from planCacheListPlans() result ' + tojson(entry));
+assert(entry.hasOwnProperty('works'), entry);
assert.eq(entry.isActive, false);
-let plans = entry.plans;
-assert.eq(2, plans.length, 'unexpected number of plans cached for query');
-
-// Print every plan.
-// Plan details/feedback verified separately in section after Query Plan Revision tests.
-print('planCacheListPlans result:');
-for (let i = 0; i < plans.length; i++) {
- print('plan ' + i + ': ' + tojson(plans[i]));
-}
+// We expect that there were two candidate plans evaluated when the cache entry was created.
+assert(entry.hasOwnProperty("creationExecStats"), entry);
+assert.eq(2, entry.creationExecStats.length, entry);
// Test the queryHash and planCacheKey property by comparing entries for two different
// query shapes.
-assert.eq(0, t.find({a: 132}).sort({b: -1, a: 1}).itcount(), 'unexpected document count');
+assert.eq(0, coll.find({a: 123}).sort({b: -1, a: 1}).itcount(), 'unexpected document count');
let entryNewShape = getPlansForCacheEntry({a: 123}, {b: -1, a: 1}, {});
assert.eq(entry.hasOwnProperty("queryHash"), true);
assert.eq(entryNewShape.hasOwnProperty("queryHash"), true);
@@ -87,41 +106,34 @@ assert.eq(entry.hasOwnProperty("planCacheKey"), true);
assert.eq(entryNewShape.hasOwnProperty("planCacheKey"), true);
assert.neq(entry["planCacheKey"], entryNewShape["planCacheKey"]);
-//
-// Tests for plan reason and feedback in planCacheListPlans
-//
-
// Generate more plans for test query by adding indexes (compound and sparse). This will also
// clear the plan cache.
-assert.commandWorked(t.ensureIndex({a: -1}, {sparse: true}));
-assert.commandWorked(t.ensureIndex({a: 1, b: 1}));
+assert.commandWorked(coll.createIndex({a: -1}, {sparse: true}));
+assert.commandWorked(coll.createIndex({a: 1, b: 1}));
-// Implementation note: feedback stats is calculated after 20 executions. See
-// PlanCacheEntry::kMaxFeedback.
let numExecutions = 100;
for (let i = 0; i < numExecutions; i++) {
- assert.eq(0, t.find({a: 3, b: 3}, {_id: 0, a: 1}).sort({a: -1}).itcount(), 'query failed');
+ assert.eq(0, coll.find({a: 3, b: 3}, {_id: 0, a: 1}).sort({a: -1}).itcount(), 'query failed');
}
+// Verify that the time of creation listed for the plan cache entry is reasonably close to 'now'.
now = (new Date()).getTime();
checkTimeOfCreation({a: 3, b: 3}, {a: -1}, {_id: 0, a: 1}, now);
+// Test that the cache entry is listed as active.
entry = getPlansForCacheEntry({a: 3, b: 3}, {a: -1}, {_id: 0, a: 1});
-assert(entry.hasOwnProperty('works'), 'works missing from planCacheListPlans() result');
+assert(entry.hasOwnProperty('works'), entry);
assert.eq(entry.isActive, true);
-plans = entry.plans;
-// This should be obvious but feedback is available only for the first (winning) plan.
-print('planCacheListPlans result (after adding indexes and completing 20 executions):');
-for (let i = 0; i < plans.length; i++) {
- print('plan ' + i + ': ' + tojson(plans[i]));
- assert.gt(plans[i].reason.score, 0, 'plan ' + i + ' score is invalid');
+// There should be the same number of canidate plan scores as candidate plans.
+assert.eq(entry.creationExecStats.length, entry.candidatePlanScores.length, entry);
+
+// Scores should be greater than zero and sorted descending.
+for (let i = 0; i < entry.candidatePlanScores.length; ++i) {
+ const scores = entry.candidatePlanScores;
+ assert.gt(scores[i], 0, entry);
if (i > 0) {
- assert.lte(plans[i].reason.score,
- plans[i - 1].reason.score,
- 'plans not sorted by score in descending order. ' +
- 'plan ' + i + ' has a score that is greater than that of the previous plan');
+ assert.lte(scores[i], scores[i - 1], entry);
}
- assert(plans[i].reason.stats.hasOwnProperty('stage'), 'no stats inserted for plan ' + i);
}
})();
diff --git a/jstests/core/plan_cache_list_shapes.js b/jstests/core/plan_cache_list_shapes.js
index 89b9c900354..4a6b4ccc3a9 100644
--- a/jstests/core/plan_cache_list_shapes.js
+++ b/jstests/core/plan_cache_list_shapes.js
@@ -1,5 +1,5 @@
-// Test the planCacheListQueryShapes command, which returns a list of query shapes
-// for the queries currently cached in the collection.
+// Test using the $planCacheStats aggregation metadata source to list all of the query shapes cached
+// for a particular collection.
//
// @tags: [
// # This test attempts to perform queries with plan cache filters set up. The former operation
@@ -7,63 +7,47 @@
// # primary.
// # If all chunks are moved off of a shard, it can cause the plan cache to miss commands.
// assumes_read_preference_unchanged,
+// assumes_read_concern_unchanged,
// does_not_support_stepdowns,
// assumes_balancer_off,
+// assumes_against_mongod_not_mongos,
// ]
(function() {
-const t = db.jstests_plan_cache_list_shapes;
-t.drop();
+const coll = db.jstests_plan_cache_list_shapes;
+coll.drop();
-// Utility function to list query shapes in cache.
-function getShapes(collection) {
- if (collection === undefined) {
- collection = t;
- }
- const res = collection.runCommand('planCacheListQueryShapes');
- print('planCacheListQueryShapes() = ' + tojson(res));
- assert.commandWorked(res, 'planCacheListQueryShapes failed');
- assert(res.hasOwnProperty('shapes'), 'shapes missing from planCacheListQueryShapes result');
- return res.shapes;
+function dumpPlanCacheState() {
+ return coll.aggregate([{$planCacheStats: {}}]).toArray();
}
-// Attempting to retrieve cache information on non-existent collection is not an error and
-// should return an empty array of query shapes.
-const missingCollection = db.jstests_query_cache_missing;
-missingCollection.drop();
-assert.eq(0,
- getShapes(missingCollection).length,
- 'planCacheListQueryShapes should return empty array on non-existent collection');
+// Utility function to list query shapes in cache.
+function getCachedQueryShapes() {
+ return coll.aggregate([{$planCacheStats: {}}, {$replaceWith: '$createdFromQuery'}]).toArray();
+}
-assert.commandWorked(t.save({a: 1, b: 1}));
-assert.commandWorked(t.save({a: 1, b: 2}));
-assert.commandWorked(t.save({a: 1, b: 2}));
-assert.commandWorked(t.save({a: 2, b: 2}));
+assert.commandWorked(coll.insert({a: 1, b: 1}));
+assert.commandWorked(coll.insert({a: 1, b: 2}));
+assert.commandWorked(coll.insert({a: 1, b: 2}));
+assert.commandWorked(coll.insert({a: 2, b: 2}));
// We need two indices so that the MultiPlanRunner is executed.
-assert.commandWorked(t.ensureIndex({a: 1}));
-assert.commandWorked(t.ensureIndex({a: 1, b: 1}));
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({a: 1, b: 1}));
// Run a query.
-assert.eq(
- 1, t.find({a: 1, b: 1}, {_id: 1, a: 1}).sort({a: -1}).itcount(), 'unexpected document count');
+assert.eq(1, coll.find({a: 1, b: 1}, {_id: 1, a: 1}).sort({a: -1}).itcount());
// We now expect the two indices to be compared and a cache entry to exist. Retrieve query
// shapes from the test collection Number of shapes should match queries executed by multi-plan
// runner.
-let shapes = getShapes();
-assert.eq(1, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result');
-// Since the queryHash is computed in the server, we filter it out when matching query shapes
-// here.
-let filteredShape0 = shapes[0];
-delete filteredShape0.queryHash;
-assert.eq({query: {a: 1, b: 1}, sort: {a: -1}, projection: {_id: 1, a: 1}},
- filteredShape0,
- 'unexpected query shape returned from planCacheListQueryShapes');
+let shapes = getCachedQueryShapes();
+assert.eq(1, shapes.length, dumpPlanCacheState());
+assert.eq({query: {a: 1, b: 1}, sort: {a: -1}, projection: {_id: 1, a: 1}}, shapes[0]);
// Running a different query shape should cause another entry to be cached.
-assert.eq(1, t.find({a: 1, b: 1}).itcount(), 'unexpected document count');
-shapes = getShapes();
-assert.eq(2, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result');
+assert.eq(1, coll.find({a: 1, b: 1}).itcount());
+shapes = dumpPlanCacheState();
+assert.eq(2, shapes.length, shapes);
// Check that each shape has a unique queryHash.
assert.neq(shapes[0]["queryHash"], shapes[1]["queryHash"]);
@@ -72,9 +56,9 @@ assert.neq(shapes[0]["queryHash"], shapes[1]["queryHash"]);
// Insert some documents with strings so we have something to search for.
for (let i = 0; i < 5; i++) {
- assert.commandWorked(t.insert({a: 3, s: 'hello world'}));
+ assert.commandWorked(coll.insert({a: 3, s: 'hello world'}));
}
-assert.commandWorked(t.insert({a: 3, s: 'hElLo wOrLd'}));
+assert.commandWorked(coll.insert({a: 3, s: 'hElLo wOrLd'}));
// Run a query with a regex. Also must include 'a' so that the query may use more than one
// index, and thus, must use the MultiPlanner.
@@ -82,14 +66,16 @@ const regexQuery = {
s: {$regex: 'hello world', $options: 'm'},
a: 3
};
-assert.eq(5, t.find(regexQuery).itcount());
+assert.eq(5, coll.find(regexQuery).itcount());
-assert.eq(3, getShapes().length, 'unexpected number of shapes in planCacheListQueryShapes result ');
+shapes = getCachedQueryShapes();
+assert.eq(3, shapes.length, shapes);
// Run the same query, but with different regex options. We expect that this should cause a
// shape to get added.
regexQuery.s.$options = 'mi';
// There is one more result since the query is now case sensitive.
-assert.eq(6, t.find(regexQuery).itcount());
-assert.eq(4, getShapes().length, 'unexpected number of shapes in planCacheListQueryShapes result');
+assert.eq(6, coll.find(regexQuery).itcount());
+shapes = getCachedQueryShapes();
+assert.eq(4, shapes.length, shapes);
})();
diff --git a/jstests/core/plan_cache_shell_helpers.js b/jstests/core/plan_cache_shell_helpers.js
index e95a6661f34..9793f2e9811 100644
--- a/jstests/core/plan_cache_shell_helpers.js
+++ b/jstests/core/plan_cache_shell_helpers.js
@@ -6,73 +6,44 @@
// # routed to the primary.
// # If all chunks are moved off of a shard, it can cause the plan cache to miss commands.
// assumes_read_preference_unchanged,
+// assumes_read_concern_unchanged,
// does_not_support_stepdowns,
// assumes_balancer_off,
+// assumes_against_mongod_not_mongos,
// ]
+(function() {
+var coll = db.jstests_plan_cache_shell_helpers;
+coll.drop();
-var t = db.jstests_plan_cache_shell_helpers;
-t.drop();
-
-// Utility function to list query shapes in cache.
-function getShapes(collection) {
- if (collection == undefined) {
- collection = t;
- }
- var res = collection.runCommand('planCacheListQueryShapes');
- print('planCacheListQueryShapes() = ' + tojson(res));
- assert.commandWorked(res, 'planCacheListQueryShapes failed');
- assert(res.hasOwnProperty('shapes'), 'shapes missing from planCacheListQueryShapes result');
- return res.shapes;
-}
-// Utility function to list plans for a query.
-function getPlans(query, sort, projection) {
- var key = {query: query, sort: sort, projection: projection};
- var res = t.runCommand('planCacheListPlans', key);
- assert.commandWorked(res, 'planCacheListPlans(' + tojson(key, '', true) + ' failed');
- assert(res.hasOwnProperty('plans'),
- 'plans missing from planCacheListPlans(' + tojson(key, '', true) + ') result');
- return res.plans;
-}
-
-function assertEmptyCache(cache, errMsg) {
- assert.eq(0, cache.listQueryShapes().length, errMsg + '\ncache contained: \n ', +tojson(cache));
-}
-
-function assertQueryNotInCache(cache, query, errMsg) {
- assert.eq(0,
- planCache.getPlansByQuery(query).plans.length,
- errMsg + '\ncache contained: \n ',
- +tojson(cache));
-}
-
-function assertCacheLength(cache, length, errMsg) {
- assert.eq(length, cache.length, errMsg + '\ncache contained: \n ', +tojson(cache));
+function assertCacheLength(length) {
+ const cacheContents = coll.getPlanCache().list();
+ assert.eq(length, cacheContents.length, cacheContents);
}
-// Add data an indices.
+// Add data and indices.
var n = 200;
for (var i = 0; i < n; i++) {
- t.save({a: i, b: -1, c: 1});
+ assert.commandWorked(coll.insert({a: i, b: -1, c: 1}));
}
-t.ensureIndex({a: 1});
-t.ensureIndex({b: 1});
+assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(coll.createIndex({b: 1}));
// Populate plan cache.
var queryB = {a: {$gte: 199}, b: -1};
var projectionB = {_id: 0, b: 1};
var sortC = {c: -1};
-assert.eq(1, t.find(queryB, projectionB).sort(sortC).itcount(), 'unexpected document count');
-assert.eq(1, t.find(queryB, projectionB).itcount(), 'unexpected document count');
-assert.eq(1, t.find(queryB).sort(sortC).itcount(), 'unexpected document count');
-assert.eq(1, t.find(queryB).itcount(), 'unexpected document count');
-assertCacheLength(getShapes(), 4, 'unexpected number of query shapes in plan cache');
+assert.eq(1, coll.find(queryB, projectionB).sort(sortC).itcount(), 'unexpected document count');
+assert.eq(1, coll.find(queryB, projectionB).itcount(), 'unexpected document count');
+assert.eq(1, coll.find(queryB).sort(sortC).itcount(), 'unexpected document count');
+assert.eq(1, coll.find(queryB).itcount(), 'unexpected document count');
+assertCacheLength(4);
//
// PlanCache.getName
//
-var planCache = t.getPlanCache();
-assert.eq(t.getName(), planCache.getName(), 'name of plan cache should match collection');
+var planCache = coll.getPlanCache();
+assert.eq(coll.getName(), planCache.getName(), 'name of plan cache should match collection');
//
// PlanCache.help
@@ -87,72 +58,32 @@ print('plan cache:');
print(planCache);
//
-// collection.getPlanCache().listQueryShapes
+// collection.getPlanCache().list
//
var missingCollection = db.jstests_plan_cache_missing;
missingCollection.drop();
-// should return empty array on non-existent collection.
-assertEmptyCache(missingCollection.getPlanCache(),
- 'collection.getPlanCache().listQueryShapes() should return empty results ' +
- 'on non-existent collection');
-assert.eq(getShapes(),
- planCache.listQueryShapes(),
- 'unexpected collection.getPlanCache().listQueryShapes() shell helper result');
-
-//
-// collection.getPlanCache().getPlansByQuery
-//
-
-// should return empty array on non-existent query shape.
-assertQueryNotInCache(planCache,
- {unknownfield: 1},
- 'collection.getPlanCache().getPlansByQuery() should return empty results ' +
- 'on non-existent collection');
-// should error on missing required field query.
-assert.throws(function() {
- planCache.getPlansByQuery();
-});
-
-// Invoke with various permutations of required (query) and optional (projection, sort) arguments.
-assert.eq(getPlans(queryB, sortC, projectionB),
- planCache.getPlansByQuery(queryB, projectionB, sortC).plans,
- 'plans from collection.getPlanCache().getPlansByQuery() different from command result');
-assert.eq(getPlans(queryB, {}, projectionB),
- planCache.getPlansByQuery(queryB, projectionB).plans,
- 'plans from collection.getPlanCache().getPlansByQuery() different from command result');
-assert.eq(getPlans(queryB, sortC, {}),
- planCache.getPlansByQuery(queryB, undefined, sortC).plans,
- 'plans from collection.getPlanCache().getPlansByQuery() different from command result');
-assert.eq(getPlans(queryB, {}, {}),
- planCache.getPlansByQuery(queryB).plans,
- 'plans from collection.getPlanCache().getPlansByQuery() different from command result');
-
-// getPlansByQuery() will also accept a single argument with the query shape object
-// as an alternative to specifying the query, sort and projection parameters separately.
-// Format of query shape object:
-// {
-// query: <query>,
-// projection: <projection>,
-// sort: <sort>
-// }
-var shapeB = {query: queryB, projection: projectionB, sort: sortC};
-assert.eq(getPlans(queryB, sortC, projectionB),
- planCache.getPlansByQuery(shapeB).plans,
- 'collection.getPlanCache().getPlansByQuery() did not accept query shape object');
-
-// Should return empty array on missing or extra fields in query shape object.
-// The entire invalid query shape object will be passed to the command
-// as the 'query' component which will result in the server returning an empty
-// array of plans.
-assertQueryNotInCache(planCache,
- {query: queryB},
- 'collection.getPlanCache.getPlansByQuery should return empty results on ' +
- 'incomplete query shape');
-assertQueryNotInCache(planCache,
- {query: queryB, sort: sortC, projection: projectionB, unknown_field: 1},
- 'collection.getPlanCache.getPlansByQuery should return empty results on ' +
- 'invalid query shape');
+// Listing the cache for a non-existing collection is expected to fail by throwing.
+assert.throws(() => missingCollection.getPlanCache().list());
+
+// Test that we can use $group and $count with the list() helper.
+assert.eq([{_id: null, count: 4}],
+ planCache.list([{$group: {_id: null, count: {$sum: 1}}}]),
+ planCache.list());
+assert.eq([{count: 4}], planCache.list([{$count: "count"}]), planCache.list());
+
+// Test that we can collect descriptions of all the queries that created cache entries using the
+// list() helper. Also verify that these are listed in order of most recently created to least
+// recently created.
+assert.eq(
+ [
+ {query: queryB, sort: {}, projection: {}},
+ {query: queryB, sort: sortC, projection: {}},
+ {query: queryB, sort: {}, projection: projectionB},
+ {query: queryB, sort: sortC, projection: projectionB}
+ ],
+ planCache.list([{$sort: {timeOfCreation: -1}}, {$replaceWith: "$createdFromQuery"}]),
+ planCache.list());
//
// collection.getPlanCache().clearPlansByQuery
@@ -167,25 +98,16 @@ assert.throws(function() {
// Invoke with various permutations of required (query) and optional (projection, sort) arguments.
planCache.clearPlansByQuery(queryB, projectionB);
-assertCacheLength(
- getShapes(),
- 3,
- 'query shape not dropped after running collection.getPlanCache().clearPlansByQuery()');
+assertCacheLength(3);
planCache.clearPlansByQuery(queryB, undefined, sortC);
-assertCacheLength(
- getShapes(),
- 2,
- 'query shape not dropped after running collection.getPlanCache().clearPlansByQuery()');
+assertCacheLength(2);
planCache.clearPlansByQuery(queryB);
-assertCacheLength(
- getShapes(),
- 1,
- 'query shape not dropped after running collection.getPlanCache().clearPlansByQuery()');
+assertCacheLength(1);
planCache.clear();
-assertCacheLength(getShapes(), 0, 'plan cache not empty');
+assertCacheLength(0);
// clearPlansByQuery() will also accept a single argument with the query shape object
// as an alternative to specifying the query, sort and projection parameters separately.
@@ -197,14 +119,11 @@ assertCacheLength(getShapes(), 0, 'plan cache not empty');
// }
// Repopulate cache
-assert.eq(1, t.find(queryB).sort(sortC).itcount(), 'unexpected document count');
+assert.eq(1, coll.find(queryB).sort(sortC).itcount(), 'unexpected document count');
// Clear using query shape object.
planCache.clearPlansByQuery({query: queryB, projection: {}, sort: sortC});
-assertCacheLength(
- getShapes(),
- 0,
- 'plan cache not empty. collection.getPlanCache().clearPlansByQuery did not accept query shape object');
+assertCacheLength(0);
// Should not error on missing or extra fields in query shape object.
planCache.clearPlansByQuery({query: queryB});
@@ -218,22 +137,14 @@ planCache.clearPlansByQuery(
// Should not error on non-existent collection.
missingCollection.getPlanCache().clear();
// Re-populate plan cache with 1 query shape.
-assert.eq(1, t.find(queryB, projectionB).sort(sortC).itcount(), 'unexpected document count');
-assertCacheLength(getShapes(), 1, 'plan cache should not be empty after running cacheable query');
+assert.eq(1, coll.find(queryB, projectionB).sort(sortC).itcount(), 'unexpected document count');
+assertCacheLength(1);
// Clear cache.
planCache.clear();
-assertCacheLength(getShapes(), 0, 'plan cache not empty after clearing');
-
-//
-// explain and plan cache
-// Running explain should not mutate the plan cache.
-//
+assertCacheLength(0);
+// Verify that explaining a find command does not write to the plan cache.
planCache.clear();
-
-// MultiPlanRunner explain
-var multiPlanRunnerExplain = t.find(queryB, projectionB).sort(sortC).explain(true);
-
-print('multi plan runner explain = ' + tojson(multiPlanRunnerExplain));
-
-assertCacheLength(getShapes(), 0, 'explain should not mutate plan cache');
+const explain = coll.find(queryB, projectionB).sort(sortC).explain(true);
+assertCacheLength(0);
+}());
diff --git a/jstests/core/profile_query_hash.js b/jstests/core/profile_query_hash.js
index d08f0de236a..73e27e3bb6c 100644
--- a/jstests/core/profile_query_hash.js
+++ b/jstests/core/profile_query_hash.js
@@ -1,7 +1,11 @@
-// @tags: [does_not_support_stepdowns, requires_profiling]
-
// Confirms that profile entries for find commands contain the appropriate query hash.
-
+//
+// @tags: [
+// does_not_support_stepdowns,
+// requires_profiling,
+// assumes_against_mongod_not_mongos,
+// assumes_read_concern_unchanged,
+// ]
(function() {
"use strict";
@@ -15,11 +19,6 @@ const coll = testDB.test;
// Utility function to list query shapes in cache. The length of the list of query shapes
// returned is used to validate the number of query hashes accumulated.
-function getShapes(collection) {
- const res = collection.runCommand('planCacheListQueryShapes');
- return res.shapes;
-}
-
assert.commandWorked(coll.insert({a: 1, b: 1}));
assert.commandWorked(coll.insert({a: 1, b: 2}));
assert.commandWorked(coll.insert({a: 1, b: 2}));
@@ -39,8 +38,8 @@ assert.eq(1,
const profileObj0 =
getLatestProfilerEntry(testDB, {op: "query", "command.comment": "Query0 find command"});
assert(profileObj0.hasOwnProperty("planCacheKey"), tojson(profileObj0));
-let shapes = getShapes(coll);
-assert.eq(1, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result');
+let shapes = coll.getPlanCache().list();
+assert.eq(1, shapes.length, shapes);
// Executes query1 and gets the corresponding system.profile entry.
assert.eq(0,
@@ -52,8 +51,8 @@ assert(profileObj1.hasOwnProperty("planCacheKey"), tojson(profileObj1));
// Since the query shapes are the same, we only expect there to be one query shape present in
// the plan cache commands output.
-shapes = getShapes(coll);
-assert.eq(1, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result');
+shapes = coll.getPlanCache().list();
+assert.eq(1, shapes.length, shapes);
assert.eq(
profileObj0.planCacheKey, profileObj1.planCacheKey, 'unexpected not matching query hashes');
@@ -84,8 +83,8 @@ assert(profileObj2.hasOwnProperty("planCacheKey"), tojson(profileObj2));
// Query0 and query1 should both have the same query hash for the given indexes. Whereas, query2
// should have a unique hash. Asserts that a total of two distinct hashes results in two query
// shapes.
-shapes = getShapes(coll);
-assert.eq(2, shapes.length, 'unexpected number of shapes in planCacheListQueryShapes result');
+shapes = coll.getPlanCache().list();
+assert.eq(2, shapes.length, shapes);
assert.neq(profileObj0.planCacheKey, profileObj2.planCacheKey, 'unexpected matching query hashes');
// The planCacheKey in explain should be different for query2 than the hash from query0 and
diff --git a/jstests/core/restart_catalog.js b/jstests/core/restart_catalog.js
index bf254537239..9062c8132da 100644
--- a/jstests/core/restart_catalog.js
+++ b/jstests/core/restart_catalog.js
@@ -86,8 +86,6 @@ assert.commandWorked(db.adminCommand({restartCatalog: 1}));
// Access the query plan cache. (This makes no assumptions about the state of the plan cache
// after restart; however, the database definitely should not crash.)
[songsColl, artistsColl].forEach(coll => {
- assert.commandWorked(coll.runCommand("planCacheListPlans", {query: {_id: 1}}));
- assert.commandWorked(coll.runCommand("planCacheListQueryShapes"));
assert.commandWorked(coll.runCommand("planCacheClear"));
});
diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js
index 9a7e4f778a8..a1590f35e9c 100644
--- a/jstests/core/views/views_all_commands.js
+++ b/jstests/core/views/views_all_commands.js
@@ -2,11 +2,17 @@
// assumes_superuser_permissions,
// does_not_support_stepdowns,
// requires_fastcount,
+// requires_fcv_44,
// requires_getmore,
// requires_non_retryable_commands,
// requires_non_retryable_writes,
// uses_map_reduce_with_temp_collections,
// ]
+//
+// Tagged as 'requires_fcv_44', since this test cannot run against versions less then 4.4. This is
+// because 'planCacheListPlans' and 'planCacheListQueryShapes' were deleted in 4.4, and thus not
+// tested here. But this test asserts that all commands are covered, so will fail against a version
+// of the server which implements these commands.
/*
* Declaratively-defined tests for views for all database commands. This file contains a map of test
@@ -419,8 +425,6 @@ let viewsCommandTests = {
planCacheClear: {command: {planCacheClear: "view"}, expectFailure: true},
planCacheClearFilters: {command: {planCacheClearFilters: "view"}, expectFailure: true},
planCacheListFilters: {command: {planCacheListFilters: "view"}, expectFailure: true},
- planCacheListPlans: {command: {planCacheListPlans: "view"}, expectFailure: true},
- planCacheListQueryShapes: {command: {planCacheListQueryShapes: "view"}, expectFailure: true},
planCacheSetFilter: {command: {planCacheSetFilter: "view"}, expectFailure: true},
prepareTransaction: {skip: isUnrelated},
profile: {skip: isUnrelated},
diff --git a/jstests/libs/override_methods/network_error_and_txn_override.js b/jstests/libs/override_methods/network_error_and_txn_override.js
index 11b2f4735ae..f9eee17c6dc 100644
--- a/jstests/libs/override_methods/network_error_and_txn_override.js
+++ b/jstests/libs/override_methods/network_error_and_txn_override.js
@@ -156,8 +156,6 @@ const kNonFailoverTolerantCommands = new Set([
"planCacheClear", // The plan cache isn't replicated.
"planCacheClearFilters",
"planCacheListFilters",
- "planCacheListPlans",
- "planCacheListQueryShapes",
"planCacheSetFilter",
"profile", // Not replicated, so can't tolerate failovers.
"setParameter", // Not replicated, so can't tolerate failovers.
diff --git a/jstests/noPassthrough/global_operation_latency_histogram.js b/jstests/noPassthrough/global_operation_latency_histogram.js
index 2045391c71c..f2f7fdbfa9d 100644
--- a/jstests/noPassthrough/global_operation_latency_histogram.js
+++ b/jstests/noPassthrough/global_operation_latency_histogram.js
@@ -149,8 +149,8 @@ testColl.dataSize();
lastHistogram = checkHistogramDiff(0, 0, 1);
// PlanCache
-testColl.getPlanCache().listQueryShapes();
-lastHistogram = checkHistogramDiff(0, 0, 1);
+testColl.getPlanCache().list();
+lastHistogram = checkHistogramDiff(1, 0, 0);
// ServerStatus
assert.commandWorked(testDB.serverStatus());
diff --git a/jstests/noPassthrough/plan_cache_index_create.js b/jstests/noPassthrough/plan_cache_index_create.js
index cc79a81bb25..dd9184689b3 100644
--- a/jstests/noPassthrough/plan_cache_index_create.js
+++ b/jstests/noPassthrough/plan_cache_index_create.js
@@ -23,17 +23,19 @@ function indexBuildIsRunning(testDB, indexName) {
// Returns whether a cached plan exists for 'query'.
function assertDoesNotHaveCachedPlan(coll, query) {
- const key = {query: query};
- const cmdRes = assert.commandWorked(coll.runCommand('planCacheListPlans', key));
- assert(cmdRes.hasOwnProperty('plans') && cmdRes.plans.length == 0, tojson(cmdRes));
+ const match = {"createdFromQuery.query": query};
+ assert.eq([], coll.getPlanCache().list([{$match: match}]), coll.getPlanCache().list());
}
// Returns the cached plan for 'query'.
function getIndexNameForCachedPlan(coll, query) {
- const key = {query: query};
- const cmdRes = assert.commandWorked(coll.runCommand('planCacheListPlans', key));
- assert(Array.isArray(cmdRes.plans) && cmdRes.plans.length > 0, tojson(cmdRes));
- return cmdRes.plans[0].reason.stats.inputStage.indexName;
+ const match = {"createdFromQuery.query": query};
+ const plans = coll.getPlanCache().list([{$match: match}]);
+ assert.eq(plans.length, 1, coll.getPlanCache().list());
+ assert(plans[0].hasOwnProperty("cachedPlan"), plans);
+ assert(plans[0].cachedPlan.hasOwnProperty("inputStage"), plans);
+ assert(plans[0].cachedPlan.inputStage.hasOwnProperty("indexName"), plans);
+ return plans[0].cachedPlan.inputStage.indexName;
}
function runTest({rst, readDB, writeDB}) {
diff --git a/jstests/noPassthrough/plan_cache_list_failed_plans.js b/jstests/noPassthrough/plan_cache_list_failed_plans.js
index 13101d7247c..634a64fdbd1 100644
--- a/jstests/noPassthrough/plan_cache_list_failed_plans.js
+++ b/jstests/noPassthrough/plan_cache_list_failed_plans.js
@@ -1,4 +1,4 @@
-// Confirms the planCacheListPlans output format includes information about failed plans.
+// Confirms the $planCacheStats output format includes information about failed plans.
(function() {
"use strict";
@@ -22,20 +22,24 @@ assert.commandWorked(coll.createIndex({a: 1}));
assert.commandWorked(coll.createIndex({d: 1}));
// Assert that the find command found documents.
-const key = {
- query: {a: 1},
- sort: {d: 1},
- projection: {}
-};
-assert.eq(smallNumber, coll.find(key.query).sort(key.sort).itcount());
-let res = assert.commandWorked(coll.runCommand("planCacheListPlans", key));
-
-// There should have been two plans generated.
-assert.eq(res["plans"].length, 2);
-// The second plan should fail.
-assert.eq(res["plans"][1]["reason"]["failed"], true);
+assert.eq(smallNumber, coll.find({a: 1}).sort({d: 1}).itcount());
+
+// We expect just one plan cache entry.
+const planCacheContents = coll.getPlanCache().list();
+assert.eq(planCacheContents.length, 1, planCacheContents);
+const planCacheEntry = planCacheContents[0];
+
+// There should have been two candidate plans evaluated when the plan cache entry was created.
+const creationExecStats = planCacheEntry.creationExecStats;
+assert.eq(creationExecStats.length, 2, planCacheEntry);
+// We expect that the first plan succeed, and the second failed.
+assert(!creationExecStats[0].hasOwnProperty("failed"), planCacheEntry);
+assert.eq(creationExecStats[1].failed, true, planCacheEntry);
// The failing plan should have a score of 0.
-assert.eq(res["plans"][1]["reason"]["score"], 0);
+const candidatePlanScores = planCacheEntry.candidatePlanScores;
+assert.eq(candidatePlanScores.length, 2, planCacheEntry);
+assert.eq(candidatePlanScores[1], 0, planCacheEntry);
+
MongoRunner.stopMongod(conn);
})();
diff --git a/jstests/noPassthrough/plan_cache_list_plans_new_format.js b/jstests/noPassthrough/plan_cache_list_plans_new_format.js
deleted file mode 100644
index b687529e5ee..00000000000
--- a/jstests/noPassthrough/plan_cache_list_plans_new_format.js
+++ /dev/null
@@ -1,111 +0,0 @@
-// Confirms the planCacheListPlans output format.
-(function() {
-"use strict";
-
-const conn = MongoRunner.runMongod();
-assert.neq(null, conn, "mongod was unable to start up");
-const testDB = conn.getDB("jstests_plan_cache_list_plans_new_format");
-const coll = testDB.test;
-assert.commandWorked(
- testDB.adminCommand({setParameter: 1, internalQueryCacheListPlansNewOutput: true}));
-
-assert.commandWorked(coll.createIndex({a: 1}));
-assert.commandWorked(coll.createIndex({b: 1}));
-
-const testQuery = {
- "a": {"$gte": 0},
- "b": 32
-};
-const testSort = {
- "c": -1
-};
-const testProjection = {};
-
-// Validate planCacheListPlans result fields for a query shape with a corresponding cache entry.
-assert.eq(0, coll.find(testQuery).sort(testSort).itcount());
-let key = {query: testQuery, sort: testSort, projection: testProjection};
-let res = assert.commandWorked(coll.runCommand('planCacheListPlans', key));
-
-// Confirm both the existence and contents of "createdFromQuery".
-assert(res.hasOwnProperty("createdFromQuery"),
- `planCacheListPlans should return a result with
- field "createdFromQuery"`);
-assert.eq(res.createdFromQuery.query,
- testQuery,
- `createdFromQuery should contain field "query"
- with value ${testQuery}, instead got "createdFromQuery": ${res.createdFromQuery}`);
-assert.eq(res.createdFromQuery.sort,
- testSort,
- `createdFromQuery should contain field "sort"
- with value ${testSort}, instead got "createdFromQuery": ${res.createdFromQuery}`);
-assert.eq(res.createdFromQuery.projection, testProjection, `createdFromQuery should contain
- field "projection" with value ${testProjection}, instead got "createdFromQuery":
- ${res.createdFromQuery}`);
-
-// Confirm 'res' contains 'works' and a valid 'queryHash' field.
-assert(res.hasOwnProperty("works"), `planCacheListPlans result is missing "works" field`);
-assert.gt(res.works, 0, `planCacheListPlans result field "works" should be greater than 0`);
-assert(res.hasOwnProperty("queryHash"),
- `planCacheListPlans result is missing "queryHash"
- field`);
-assert.eq(8,
- res.queryHash.length,
- `planCacheListPlans result field "queryHash" should be 8
- characters long`);
-
-// Validate that 'cachedPlan' and 'creationExecStats' fields exist and both have consistent
-// information about the winning plan.
-assert(res.hasOwnProperty("cachedPlan"),
- `planCacheListPlans result is missing field
- "cachedPlan" field`);
-assert(res.hasOwnProperty("creationExecStats"),
- `planCacheListPlans result is missing
- "creationExecStats" field`);
-assert.gte(res.creationExecStats.length,
- 2,
- `creationExecStats should contain stats for both the
- winning plan and all rejected plans. Thus, should contain at least 2 elements but got:
- ${res.creationStats}`);
-let cachedStage = assert(res.cachedPlan.stage, `cachedPlan should have field "stage"`);
-let winningExecStage = assert(res.creationExecStats[0].executionStages,
- `creationExecStats[0]
- should have field "executionStages"`);
-assert.eq(cachedStage,
- winningExecStage,
- `Information about the winning plan in "cachedPlan" is
- inconsistent with the first element in "creationExecStats".`);
-
-// Ensures that the new format preservers information about the failed plans.
-assert(coll.drop());
-
-// Setup the database such that it will generate a failing plan and a succeeding plan.
-const numDocs = 32;
-const smallNumber = 10;
-assert.commandWorked(testDB.adminCommand(
- {setParameter: 1, internalQueryMaxBlockingSortMemoryUsageBytes: smallNumber}));
-for (let i = 0; i < numDocs * 2; ++i)
- assert.commandWorked(coll.insert({a: ((i >= (numDocs * 2) - smallNumber) ? 1 : 0), d: i}));
-
-// Create the indexes to create competing plans.
-assert.commandWorked(coll.createIndex({a: 1}));
-assert.commandWorked(coll.createIndex({d: 1}));
-
-// Assert that the find command found documents.
-key = {
- query: {a: 1},
- sort: {d: 1},
- projection: {}
-};
-assert.eq(smallNumber, coll.find(key.query).sort(key.sort).itcount());
-res = assert.commandWorked(coll.runCommand('planCacheListPlans', key));
-
-// There should have been two plans generated.
-assert.eq(res["creationExecStats"].length, 2);
-
-// The second plan should have failed.
-assert(res["creationExecStats"][1].failed);
-
-// The failing plan should have a score of 0.
-assert.eq(res["candidatePlanScores"][1], 0);
-MongoRunner.stopMongod(conn);
-})();
diff --git a/jstests/noPassthrough/query_knobs_validation.js b/jstests/noPassthrough/query_knobs_validation.js
index 1523187eba1..138326854f4 100644
--- a/jstests/noPassthrough/query_knobs_validation.js
+++ b/jstests/noPassthrough/query_knobs_validation.js
@@ -19,7 +19,6 @@ const expectedParamDefaults = {
internalQueryCacheEvictionRatio: 10.0,
internalQueryCacheWorksGrowthCoefficient: 2.0,
internalQueryCacheDisableInactiveEntries: false,
- internalQueryCacheListPlansNewOutput: false,
internalQueryPlannerMaxIndexedSolutions: 64,
internalQueryEnumerationMaxOrSolutions: 10,
internalQueryEnumerationMaxIntersectPerAnd: 3,
diff --git a/jstests/noPassthroughWithMongod/plan_cache_replanning.js b/jstests/noPassthroughWithMongod/plan_cache_replanning.js
index 3882a2c4106..5df81821ae7 100644
--- a/jstests/noPassthroughWithMongod/plan_cache_replanning.js
+++ b/jstests/noPassthroughWithMongod/plan_cache_replanning.js
@@ -11,14 +11,10 @@ load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Col
const coll = assertDropAndRecreateCollection(db, "plan_cache_replanning");
-function getPlansForCacheEntry(query) {
- let key = {query: query, sort: {}, projection: {}};
- let res = coll.runCommand("planCacheListPlans", key);
- assert.commandWorked(res, `planCacheListPlans(${tojson(key)}) failed`);
- assert(res.hasOwnProperty("plans"),
- `plans missing from planCacheListPlans(${tojson(key)}) failed`);
-
- return res;
+function getPlansForCacheEntry(match) {
+ const matchingCacheEntries = coll.getPlanCache().list([{$match: match}]);
+ assert.eq(matchingCacheEntries.length, 1, coll.getPlanCache().list());
+ return matchingCacheEntries[0];
}
function planHasIxScanStageForKey(planStats, keyPattern) {
@@ -30,11 +26,6 @@ function planHasIxScanStageForKey(planStats, keyPattern) {
return bsonWoCompare(keyPattern, stage.keyPattern) == 0;
}
-const queryShape = {
- a: 1,
- b: 1
-};
-
// Carefully construct a collection so that some queries will do well with an {a: 1} index
// and others with a {b: 1} index.
for (let i = 1000; i < 1100; i++) {
@@ -66,17 +57,21 @@ assert.commandWorked(coll.createIndex({b: 1}));
assert.eq(1, coll.find(bIndexQuery).itcount());
// The plan cache should now hold an inactive entry.
-let entry = getPlansForCacheEntry(queryShape);
+let entry = getPlansForCacheEntry({"createdFromQuery.query": bIndexQuery});
let entryWorks = entry.works;
assert.eq(entry.isActive, false);
-assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
+assert.eq(planHasIxScanStageForKey(entry.cachedPlan, {b: 1}), true, entry);
+
+// Get the hash of the query shape so that we keep looking up entries associated with the same shape
+// going forward.
+const queryHash = entry.queryHash;
// Re-run the query. The inactive cache entry should be promoted to an active entry.
assert.eq(1, coll.find(bIndexQuery).itcount());
-entry = getPlansForCacheEntry(queryShape);
+entry = getPlansForCacheEntry({queryHash: queryHash});
assert.eq(entry.isActive, true);
assert.eq(entry.works, entryWorks);
-assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
+assert.eq(planHasIxScanStageForKey(entry.cachedPlan, {b: 1}), true, entry);
// Now we will attempt to oscillate the cache entry by interleaving queries which should use
// the {a:1} and {b:1} index. When the plan using the {b: 1} index is in the cache, running a
@@ -87,30 +82,30 @@ assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
// index. The current cache entry will be deactivated, and then the cache entry for the {a: 1}
// will overwrite it (as active).
assert.eq(1, coll.find(aIndexQuery).itcount());
-entry = getPlansForCacheEntry(queryShape);
+entry = getPlansForCacheEntry({queryHash: queryHash});
assert.eq(entry.isActive, true);
-assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {a: 1}), true);
+assert.eq(planHasIxScanStageForKey(entry.cachedPlan, {a: 1}), true, entry);
// Run the query which should use the {b: 1} index.
assert.eq(1, coll.find(bIndexQuery).itcount());
-entry = getPlansForCacheEntry(queryShape);
+entry = getPlansForCacheEntry({queryHash: queryHash});
assert.eq(entry.isActive, true);
-assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
+assert.eq(planHasIxScanStageForKey(entry.cachedPlan, {b: 1}), true, entry);
// The {b: 1} plan is again in the cache. Run the query which should use the {a: 1}
// index.
assert.eq(1, coll.find(aIndexQuery).itcount());
-entry = getPlansForCacheEntry(queryShape);
+entry = getPlansForCacheEntry({queryHash: queryHash});
assert.eq(entry.isActive, true);
-assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {a: 1}), true);
+assert.eq(planHasIxScanStageForKey(entry.cachedPlan, {a: 1}), true, entry);
// The {a: 1} plan is back in the cache. Run the query which would perform better on the plan
// using the {b: 1} index, and ensure that plan gets written to the cache.
assert.eq(1, coll.find(bIndexQuery).itcount());
-entry = getPlansForCacheEntry(queryShape);
+entry = getPlansForCacheEntry({queryHash: queryHash});
entryWorks = entry.works;
assert.eq(entry.isActive, true);
-assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
+assert.eq(planHasIxScanStageForKey(entry.cachedPlan, {b: 1}), true, entry);
// Now run a plan that will perform poorly with both indices (it will be required to scan 500
// documents). This will result in replanning (and the cache entry being deactivated). However,
@@ -122,9 +117,9 @@ for (let i = 0; i < 500; i++) {
assert.eq(500, coll.find({a: 3, b: 3}).itcount());
// The cache entry should have been deactivated.
-entry = getPlansForCacheEntry(queryShape);
+entry = getPlansForCacheEntry({queryHash: queryHash});
assert.eq(entry.isActive, false);
-assert.eq(planHasIxScanStageForKey(entry.plans[0].reason.stats, {b: 1}), true);
+assert.eq(planHasIxScanStageForKey(entry.cachedPlan, {b: 1}), true, entry);
// The works value should have doubled.
assert.eq(entry.works, entryWorks * 2);
diff --git a/jstests/replsets/plan_cache_slaveok.js b/jstests/replsets/plan_cache_slaveok.js
index 1703620a0d5..4ef60d93795 100644
--- a/jstests/replsets/plan_cache_slaveok.js
+++ b/jstests/replsets/plan_cache_slaveok.js
@@ -4,44 +4,32 @@
var name = "plan_cache_slaveok";
function assertPlanCacheCommandsSucceed(db) {
- // .listQueryShapes()
- assert.commandWorked(db.runCommand({planCacheListQueryShapes: name}));
-
- // .getPlansByQuery()
- assert.commandWorked(db.runCommand({planCacheListPlans: name, query: {a: 1}}));
-
- // .clear()
assert.commandWorked(db.runCommand({planCacheClear: name, query: {a: 1}}));
- // setFilter
+ // Using aggregate to list the contents of the plan cache.
+ assert.commandWorked(
+ db.runCommand({aggregate: name, pipeline: [{$planCacheStats: {}}], cursor: {}}));
+
assert.commandWorked(
db.runCommand({planCacheSetFilter: name, query: {a: 1}, indexes: [{a: 1}]}));
- // listFilters
assert.commandWorked(db.runCommand({planCacheListFilters: name}));
- // clearFilters
assert.commandWorked(db.runCommand({planCacheClearFilters: name, query: {a: 1}}));
}
function assertPlanCacheCommandsFail(db) {
- // .listQueryShapes()
- assert.commandFailed(db.runCommand({planCacheListQueryShapes: name}));
-
- // .getPlansByQuery()
- assert.commandFailed(db.runCommand({planCacheListPlans: name, query: {a: 1}}));
-
- // .clear()
assert.commandFailed(db.runCommand({planCacheClear: name, query: {a: 1}}));
- // setFilter
+ // Using aggregate to list the contents of the plan cache.
+ assert.commandFailed(
+ db.runCommand({aggregate: name, pipeline: [{$planCacheStats: {}}], cursor: {}}));
+
assert.commandFailed(
db.runCommand({planCacheSetFilter: name, query: {a: 1}, indexes: [{a: 1}]}));
- // listFilters
assert.commandFailed(db.runCommand({planCacheListFilters: name}));
- // clearFilters
assert.commandFailed(db.runCommand({planCacheClearFilters: name, query: {a: 1}}));
}
diff --git a/jstests/sharding/database_versioning_all_commands.js b/jstests/sharding/database_versioning_all_commands.js
index e87ae073a9a..ef65810722f 100644
--- a/jstests/sharding/database_versioning_all_commands.js
+++ b/jstests/sharding/database_versioning_all_commands.js
@@ -620,29 +620,6 @@ let testCases = {
}
}
},
- planCacheListPlans: {
- run: {
- sendsDbVersion: true,
- setUp: function(mongosConn, dbName, collName) {
- // Expects the collection to exist, and doesn't implicitly create it.
- assert.commandWorked(mongosConn.getDB(dbName).runCommand({create: collName}));
- },
- command: function(dbName, collName) {
- return {planCacheListPlans: collName, query: {_id: "A"}};
- },
- cleanUp: function(mongosConn, dbName, collName) {
- assert(mongosConn.getDB(dbName).getCollection(collName).drop());
- }
- }
- },
- planCacheListQueryShapes: {
- run: {
- sendsDbVersion: true,
- command: function(dbName, collName) {
- return {planCacheListQueryShapes: collName};
- }
- }
- },
planCacheSetFilter: {
run: {
sendsDbVersion: true,
diff --git a/jstests/sharding/safe_secondary_reads_drop_recreate.js b/jstests/sharding/safe_secondary_reads_drop_recreate.js
index b7455ba3de1..0e8f28e12d6 100644
--- a/jstests/sharding/safe_secondary_reads_drop_recreate.js
+++ b/jstests/sharding/safe_secondary_reads_drop_recreate.js
@@ -15,6 +15,15 @@
* *when the the collection has been dropped and recreated as empty.*
* - behavior: Must be one of "unshardedOnly", "targetsPrimaryUsesConnectionVersioning" or
* "versioned". Determines what system profiler checks are performed.
+ *
+ * Tagged as 'requires_fcv_44', since this test cannot run against versions less then 4.4. This is
+ * because 'planCacheListPlans' and 'planCacheListQueryShapes' were deleted in 4.4, and thus not
+ * tested here. But this test asserts that all commands are covered, so will fail against a version
+ * of the server which implements these commands.
+ *
+ * @tags: [
+ * requires_fcv_44,
+ * ]
*/
(function() {
"use strict";
@@ -240,8 +249,6 @@ let testCases = {
planCacheClear: {skip: "does not return user data"},
planCacheClearFilters: {skip: "does not return user data"},
planCacheListFilters: {skip: "does not return user data"},
- planCacheListPlans: {skip: "does not return user data"},
- planCacheListQueryShapes: {skip: "does not return user data"},
planCacheSetFilter: {skip: "does not return user data"},
profile: {skip: "primary only"},
reapLogicalSessionCacheNow: {skip: "does not return user data"},
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
index 96f2efd2978..5b8afea14cc 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js
@@ -20,6 +20,15 @@
* results for the command run with read concern 'available'.
* - behavior: Must be one of "unshardedOnly", "targetsPrimaryUsesConnectionVersioning" or
* "versioned". Determines what system profiler checks are performed.
+ *
+ * Tagged as 'requires_fcv_44', since this test cannot run against versions less then 4.4. This is
+ * because 'planCacheListPlans' and 'planCacheListQueryShapes' were deleted in 4.4, and thus not
+ * tested here. But this test asserts that all commands are covered, so will fail against a version
+ * of the server which implements these commands.
+ *
+ * @tags: [
+ * requires_fcv_44,
+ * ]
*/
(function() {
"use strict";
@@ -275,8 +284,6 @@ let testCases = {
planCacheClear: {skip: "does not return user data"},
planCacheClearFilters: {skip: "does not return user data"},
planCacheListFilters: {skip: "does not return user data"},
- planCacheListPlans: {skip: "does not return user data"},
- planCacheListQueryShapes: {skip: "does not return user data"},
planCacheSetFilter: {skip: "does not return user data"},
profile: {skip: "primary only"},
reapLogicalSessionCacheNow: {skip: "does not return user data"},
diff --git a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
index d310da83d9e..7a2e4278a04 100644
--- a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
+++ b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js
@@ -15,6 +15,15 @@
* *when the range has been deleted on the donor.*
* - behavior: Must be one of "unshardedOnly", "targetsPrimaryUsesConnectionVersioning" or
* "versioned". Determines what system profiler checks are performed.
+ *
+ * Tagged as 'requires_fcv_44', since this test cannot run against versions less then 4.4. This is
+ * because 'planCacheListPlans' and 'planCacheListQueryShapes' were deleted in 4.4, and thus not
+ * tested here. But this test asserts that all commands are covered, so will fail against a version
+ * of the server which implements these commands.
+ *
+ * @tags: [
+ * requires_fcv_44,
+ * ]
*/
(function() {
"use strict";
@@ -245,8 +254,6 @@ let testCases = {
planCacheClear: {skip: "does not return user data"},
planCacheClearFilters: {skip: "does not return user data"},
planCacheListFilters: {skip: "does not return user data"},
- planCacheListPlans: {skip: "does not return user data"},
- planCacheListQueryShapes: {skip: "does not return user data"},
planCacheSetFilter: {skip: "does not return user data"},
profile: {skip: "primary only"},
reapLogicalSessionCacheNow: {skip: "does not return user data"},
diff --git a/jstests/sharding/track_unsharded_collections_check_shard_version.js b/jstests/sharding/track_unsharded_collections_check_shard_version.js
index 9ff6e225d6b..60ac3fe2eb0 100644
--- a/jstests/sharding/track_unsharded_collections_check_shard_version.js
+++ b/jstests/sharding/track_unsharded_collections_check_shard_version.js
@@ -259,21 +259,6 @@ let testCases = {
return {planCacheListFilters: collName};
},
},
- planCacheListPlans: {
- // Uses connection versioning.
- whenNamespaceDoesNotExistFailsWith: ErrorCodes.BadValue,
- whenNamespaceIsViewFailsWith: ErrorCodes.CommandNotSupportedOnView,
- command: collName => {
- return {planCacheListPlans: collName, query: {_id: "A"}};
- },
- },
- planCacheListQueryShapes: {
- // Uses connection versioning.
- whenNamespaceIsViewFailsWith: ErrorCodes.CommandNotSupportedOnView,
- command: collName => {
- return {planCacheListQueryShapes: collName};
- },
- },
planCacheSetFilter: {
// Uses connection versioning.
whenNamespaceDoesNotExistFailsWith: ErrorCodes.BadValue,
diff --git a/src/mongo/db/commands/SConscript b/src/mongo/db/commands/SConscript
index f7e219f6a65..38c77097e9f 100644
--- a/src/mongo/db/commands/SConscript
+++ b/src/mongo/db/commands/SConscript
@@ -269,6 +269,7 @@ env.Library(
"list_databases.cpp",
"list_indexes.cpp",
"pipeline_command.cpp",
+ "plan_cache_clear_command.cpp",
"plan_cache_commands.cpp",
"rename_collection_cmd.cpp",
"run_aggregate.cpp",
diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp
index 8ddacaf5872..59dd614002a 100644
--- a/src/mongo/db/commands/index_filter_commands.cpp
+++ b/src/mongo/db/commands/index_filter_commands.cpp
@@ -253,7 +253,7 @@ Status ClearFilters::clear(OperationContext* opCtx,
// - clear hints for single query shape when a query shape is described in the
// command arguments.
if (cmdObj.hasField("query")) {
- auto statusWithCQ = PlanCacheCommand::canonicalize(opCtx, ns, cmdObj);
+ auto statusWithCQ = plan_cache_commands::canonicalize(opCtx, ns, cmdObj);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
@@ -385,7 +385,7 @@ Status SetFilter::set(OperationContext* opCtx,
}
}
- auto statusWithCQ = PlanCacheCommand::canonicalize(opCtx, ns, cmdObj);
+ auto statusWithCQ = plan_cache_commands::canonicalize(opCtx, ns, cmdObj);
if (!statusWithCQ.isOK()) {
return statusWithCQ.getStatus();
}
diff --git a/src/mongo/db/commands/plan_cache_clear_command.cpp b/src/mongo/db/commands/plan_cache_clear_command.cpp
new file mode 100644
index 00000000000..5eb139aeddf
--- /dev/null
+++ b/src/mongo/db/commands/plan_cache_clear_command.cpp
@@ -0,0 +1,183 @@
+/**
+ * Copyright (C) 2018-present MongoDB, Inc.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the Server Side Public License, version 1,
+ * as published by MongoDB, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * Server Side Public License for more details.
+ *
+ * You should have received a copy of the Server Side Public License
+ * along with this program. If not, see
+ * <http://www.mongodb.com/licensing/server-side-public-license>.
+ *
+ * As a special exception, the copyright holders give permission to link the
+ * code of portions of this program with the OpenSSL library under certain
+ * conditions as described in each individual source file and distribute
+ * linked combinations including the program with the OpenSSL library. You
+ * must comply with the Server Side Public License in all respects for
+ * all of the code used other than as permitted herein. If you modify file(s)
+ * with this exception, you may extend this exception to your version of the
+ * file(s), but you are not obligated to do so. If you do not wish to do so,
+ * delete this exception statement from your version. If you delete this
+ * exception statement from all source files in the program, then also delete
+ * it in the license file.
+ */
+
+#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand
+
+#include "mongo/platform/basic.h"
+
+#include <string>
+
+#include "mongo/base/status.h"
+#include "mongo/db/auth/authorization_session.h"
+#include "mongo/db/catalog/collection.h"
+#include "mongo/db/commands/plan_cache_commands.h"
+#include "mongo/db/db_raii.h"
+#include "mongo/db/matcher/extensions_callback_real.h"
+#include "mongo/db/namespace_string.h"
+#include "mongo/db/query/collection_query_info.h"
+#include "mongo/db/query/plan_ranker.h"
+#include "mongo/util/log.h"
+
+namespace mongo {
+namespace {
+
+PlanCache* getPlanCache(OperationContext* opCtx, Collection* collection) {
+ invariant(collection);
+ PlanCache* planCache = CollectionQueryInfo::get(collection).getPlanCache();
+ invariant(planCache);
+ return planCache;
+}
+
+/**
+ * Clears collection's plan cache. If query shape is provided, clears plans for that single query
+ * shape only.
+ */
+Status clear(OperationContext* opCtx,
+ PlanCache* planCache,
+ const std::string& ns,
+ const BSONObj& cmdObj) {
+ invariant(planCache);
+
+ // According to the specification, the planCacheClear command runs in two modes:
+ // - clear all query shapes; or
+ // - clear plans for single query shape when a query shape is described in the
+ // command arguments.
+ if (cmdObj.hasField("query")) {
+ auto statusWithCQ = plan_cache_commands::canonicalize(opCtx, ns, cmdObj);
+ if (!statusWithCQ.isOK()) {
+ return statusWithCQ.getStatus();
+ }
+
+ auto cq = std::move(statusWithCQ.getValue());
+
+ Status result = planCache->remove(*cq);
+ if (!result.isOK()) {
+ invariant(result.code() == ErrorCodes::NoSuchKey);
+ LOG(1) << ns << ": query shape doesn't exist in PlanCache - "
+ << redact(cq->getQueryObj()) << "(sort: " << cq->getQueryRequest().getSort()
+ << "; projection: " << cq->getQueryRequest().getProj()
+ << "; collation: " << cq->getQueryRequest().getCollation() << ")";
+ return Status::OK();
+ }
+
+ LOG(1) << ns << ": removed plan cache entry - " << redact(cq->getQueryObj())
+ << "(sort: " << cq->getQueryRequest().getSort()
+ << "; projection: " << cq->getQueryRequest().getProj()
+ << "; collation: " << cq->getQueryRequest().getCollation() << ")";
+
+ return Status::OK();
+ }
+
+ // If query is not provided, make sure sort, projection, and collation are not in arguments.
+ // We do not want to clear the entire cache inadvertently when the user
+ // forgets to provide a value for "query".
+ if (cmdObj.hasField("sort") || cmdObj.hasField("projection") || cmdObj.hasField("collation")) {
+ return Status(ErrorCodes::BadValue,
+ "sort, projection, or collation provided without query");
+ }
+
+ planCache->clear();
+
+ LOG(1) << ns << ": cleared plan cache";
+
+ return Status::OK();
+}
+
+} // namespace
+
+/**
+ * The 'planCacheClear' command can be used to clear all entries from a collection's plan cache, or
+ * to delete a particular plan cache entry. In the latter case, the plan cache entry to delete is
+ * specified with an example query, like so:
+ *
+ * {
+ * planCacheClear: <collection>,
+ * query: <query>,
+ * sort: <sort>,
+ * projection: <projection>
+ * }
+ */
+class PlanCacheClearCommand final : public BasicCommand {
+public:
+ PlanCacheClearCommand() : BasicCommand("planCacheClear") {}
+
+ bool run(OperationContext* opCtx,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ BSONObjBuilder& result) override;
+
+ bool supportsWriteConcern(const BSONObj& cmd) const override {
+ return false;
+ }
+
+ AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
+ return AllowedOnSecondary::kOptIn;
+ }
+
+ Status checkAuthForCommand(Client* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) const override;
+
+ std::string help() const override {
+ return "Drops one or all plan cache entries in a collection.";
+ }
+} planCacheClearCommand;
+
+Status PlanCacheClearCommand::checkAuthForCommand(Client* client,
+ const std::string& dbname,
+ const BSONObj& cmdObj) const {
+ AuthorizationSession* authzSession = AuthorizationSession::get(client);
+ ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
+
+ if (authzSession->isAuthorizedForActionsOnResource(pattern, ActionType::planCacheWrite)) {
+ return Status::OK();
+ }
+
+ return Status(ErrorCodes::Unauthorized, "unauthorized");
+}
+
+bool PlanCacheClearCommand::run(OperationContext* opCtx,
+ const std::string& dbname,
+ const BSONObj& cmdObj,
+ BSONObjBuilder& result) {
+ const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
+
+ // This is a read lock. The query cache is owned by the collection.
+ AutoGetCollectionForReadCommand ctx(opCtx, nss);
+ if (!ctx.getCollection()) {
+ // Clearing a non-existent collection always succeeds.
+ return true;
+ }
+
+ auto planCache = getPlanCache(opCtx, ctx.getCollection());
+ uassertStatusOK(clear(opCtx, planCache, nss.ns(), cmdObj));
+ return true;
+}
+
+} // namespace mongo
diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp
index e52daa8d91a..0bafc8533b3 100644
--- a/src/mongo/db/commands/plan_cache_commands.cpp
+++ b/src/mongo/db/commands/plan_cache_commands.cpp
@@ -27,129 +27,17 @@
* it in the license file.
*/
-#define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kCommand
-
#include "mongo/platform/basic.h"
-#include <sstream>
-#include <string>
-
-#include "mongo/base/init.h"
-#include "mongo/base/status.h"
-#include "mongo/db/auth/authorization_session.h"
-#include "mongo/db/catalog/collection.h"
-#include "mongo/db/catalog/database.h"
-#include "mongo/db/client.h"
#include "mongo/db/commands/plan_cache_commands.h"
-#include "mongo/db/db_raii.h"
-#include "mongo/db/jsobj.h"
-#include "mongo/db/matcher/extensions_callback_real.h"
-#include "mongo/db/namespace_string.h"
-#include "mongo/db/query/collection_query_info.h"
-#include "mongo/db/query/explain.h"
-#include "mongo/db/query/plan_ranker.h"
-#include "mongo/util/hex.h"
-#include "mongo/util/log.h"
-
-namespace {
-
-using std::string;
-using std::unique_ptr;
-using namespace mongo;
-
-
-/**
- * Retrieves a collection's plan cache from the database.
- */
-static Status getPlanCache(OperationContext* opCtx,
- Collection* collection,
- const string& ns,
- PlanCache** planCacheOut) {
- *planCacheOut = nullptr;
-
- if (nullptr == collection) {
- return Status(ErrorCodes::BadValue, "no such collection");
- }
-
- PlanCache* planCache = CollectionQueryInfo::get(collection).getPlanCache();
- invariant(planCache);
-
- *planCacheOut = planCache;
- return Status::OK();
-}
-
-//
-// Command instances.
-// Registers commands with the command system and make commands
-// available to the client.
-//
-
-MONGO_INITIALIZER_WITH_PREREQUISITES(SetupPlanCacheCommands, MONGO_NO_PREREQUISITES)
-(InitializerContext* context) {
- // PlanCacheCommand constructors refer to static ActionType instances.
- // Registering commands in a mongo static initializer ensures that
- // the ActionType construction will be completed first.
- new PlanCacheListQueryShapesDeprecated();
- new PlanCacheClear();
- new PlanCacheListPlansDeprecated();
-
- return Status::OK();
-}
-
-} // namespace
-
-namespace mongo {
-
-using std::string;
-using std::stringstream;
-using std::unique_ptr;
-using std::vector;
-
-PlanCacheCommand::PlanCacheCommand(const string& name,
- const string& helpText,
- ActionType actionType)
- : BasicCommand(name), helpText(helpText), actionType(actionType) {}
-
-bool PlanCacheCommand::run(OperationContext* opCtx,
- const string& dbname,
- const BSONObj& cmdObj,
- BSONObjBuilder& result) {
- const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbname, cmdObj));
- Status status = runPlanCacheCommand(opCtx, nss.ns(), cmdObj, &result);
- uassertStatusOK(status);
- return true;
-}
-
-bool PlanCacheCommand::supportsWriteConcern(const BSONObj& cmd) const {
- return false;
-}
-
-Command::AllowedOnSecondary PlanCacheCommand::secondaryAllowed(ServiceContext*) const {
- return AllowedOnSecondary::kOptIn;
-}
-
-std::string PlanCacheCommand::help() const {
- return helpText;
-}
-
-Status PlanCacheCommand::checkAuthForCommand(Client* client,
- const std::string& dbname,
- const BSONObj& cmdObj) const {
- AuthorizationSession* authzSession = AuthorizationSession::get(client);
- ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
-
- if (authzSession->isAuthorizedForActionsOnResource(pattern, actionType)) {
- return Status::OK();
- }
+#include "mongo/db/matcher/extensions_callback_real.h"
- return Status(ErrorCodes::Unauthorized, "unauthorized");
-}
+namespace mongo::plan_cache_commands {
-// static
-StatusWith<unique_ptr<CanonicalQuery>> PlanCacheCommand::canonicalize(OperationContext* opCtx,
- const string& ns,
- const BSONObj& cmdObj) {
+StatusWith<std::unique_ptr<CanonicalQuery>> canonicalize(OperationContext* opCtx,
+ StringData ns,
+ const BSONObj& cmdObj) {
// query - required
BSONElement queryElt = cmdObj.getField("query");
if (queryElt.eoo()) {
@@ -218,254 +106,4 @@ StatusWith<unique_ptr<CanonicalQuery>> PlanCacheCommand::canonicalize(OperationC
return std::move(statusWithCQ.getValue());
}
-PlanCacheListQueryShapesDeprecated::PlanCacheListQueryShapesDeprecated()
- : PlanCacheCommand("planCacheListQueryShapes",
- "Deprecated. Prefer the $planCacheStats aggregation pipeline stage.",
- ActionType::planCacheRead) {}
-
-Status PlanCacheListQueryShapesDeprecated::runPlanCacheCommand(OperationContext* opCtx,
- const string& ns,
- const BSONObj& cmdObj,
- BSONObjBuilder* bob) {
- if (_sampler.tick()) {
- warning()
- << "The planCacheListQueryShapes command is deprecated. Prefer the $planCacheStats "
- "aggregation pipeline stage.";
- }
-
- // This is a read lock. The query cache is owned by the collection.
- AutoGetCollectionForReadCommand ctx(opCtx, NamespaceString(ns));
-
- PlanCache* planCache;
- Status status = getPlanCache(opCtx, ctx.getCollection(), ns, &planCache);
- if (!status.isOK()) {
- // No collection - return results with empty shapes array.
- BSONArrayBuilder arrayBuilder(bob->subarrayStart("shapes"));
- arrayBuilder.doneFast();
- return Status::OK();
- }
- return list(*planCache, bob);
-}
-
-// static
-Status PlanCacheListQueryShapesDeprecated::list(const PlanCache& planCache, BSONObjBuilder* bob) {
- invariant(bob);
-
- // Fetch all cached solutions from plan cache.
- auto entries = planCache.getAllEntries();
-
- BSONArrayBuilder arrayBuilder(bob->subarrayStart("shapes"));
- for (auto&& entry : entries) {
- invariant(entry);
-
- BSONObjBuilder shapeBuilder(arrayBuilder.subobjStart());
- shapeBuilder.append("query", entry->query);
- shapeBuilder.append("sort", entry->sort);
- shapeBuilder.append("projection", entry->projection);
- if (!entry->collation.isEmpty()) {
- shapeBuilder.append("collation", entry->collation);
- }
- shapeBuilder.append("queryHash", unsignedIntToFixedLengthHex(entry->queryHash));
- shapeBuilder.doneFast();
- }
- arrayBuilder.doneFast();
-
- return Status::OK();
-}
-
-PlanCacheClear::PlanCacheClear()
- : PlanCacheCommand("planCacheClear",
- "Drops one or all cached queries in a collection.",
- ActionType::planCacheWrite) {}
-
-Status PlanCacheClear::runPlanCacheCommand(OperationContext* opCtx,
- const std::string& ns,
- const BSONObj& cmdObj,
- BSONObjBuilder* bob) {
- // This is a read lock. The query cache is owned by the collection.
- AutoGetCollectionForReadCommand ctx(opCtx, NamespaceString(ns));
-
- PlanCache* planCache;
- Status status = getPlanCache(opCtx, ctx.getCollection(), ns, &planCache);
- if (!status.isOK()) {
- // No collection - nothing to do. Return OK status.
- return Status::OK();
- }
- return clear(opCtx, planCache, ns, cmdObj);
-}
-
-// static
-Status PlanCacheClear::clear(OperationContext* opCtx,
- PlanCache* planCache,
- const string& ns,
- const BSONObj& cmdObj) {
- invariant(planCache);
-
- // According to the specification, the planCacheClear command runs in two modes:
- // - clear all query shapes; or
- // - clear plans for single query shape when a query shape is described in the
- // command arguments.
- if (cmdObj.hasField("query")) {
- auto statusWithCQ = PlanCacheCommand::canonicalize(opCtx, ns, cmdObj);
- if (!statusWithCQ.isOK()) {
- return statusWithCQ.getStatus();
- }
-
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
-
- Status result = planCache->remove(*cq);
- if (!result.isOK()) {
- invariant(result.code() == ErrorCodes::NoSuchKey);
- LOG(1) << ns << ": query shape doesn't exist in PlanCache - "
- << redact(cq->getQueryObj()) << "(sort: " << cq->getQueryRequest().getSort()
- << "; projection: " << cq->getQueryRequest().getProj()
- << "; collation: " << cq->getQueryRequest().getCollation() << ")";
- return Status::OK();
- }
-
- LOG(1) << ns << ": removed plan cache entry - " << redact(cq->getQueryObj())
- << "(sort: " << cq->getQueryRequest().getSort()
- << "; projection: " << cq->getQueryRequest().getProj()
- << "; collation: " << cq->getQueryRequest().getCollation() << ")";
-
- return Status::OK();
- }
-
- // If query is not provided, make sure sort, projection, and collation are not in arguments.
- // We do not want to clear the entire cache inadvertently when the user
- // forgets to provide a value for "query".
- if (cmdObj.hasField("sort") || cmdObj.hasField("projection") || cmdObj.hasField("collation")) {
- return Status(ErrorCodes::BadValue,
- "sort, projection, or collation provided without query");
- }
-
- planCache->clear();
-
- LOG(1) << ns << ": cleared plan cache";
-
- return Status::OK();
-}
-
-PlanCacheListPlansDeprecated::PlanCacheListPlansDeprecated()
- : PlanCacheCommand("planCacheListPlans",
- "Deprecated. Prefer the $planCacheStats aggregation pipeline stage.",
- ActionType::planCacheRead) {}
-
-Status PlanCacheListPlansDeprecated::runPlanCacheCommand(OperationContext* opCtx,
- const std::string& ns,
- const BSONObj& cmdObj,
- BSONObjBuilder* bob) {
- if (_sampler.tick()) {
- warning() << "The planCacheListPlans command is deprecated. Prefer the $planCacheStats "
- "aggregation pipeline stage.";
- }
-
- AutoGetCollectionForReadCommand ctx(opCtx, NamespaceString(ns));
-
- PlanCache* planCache;
- uassertStatusOK(getPlanCache(opCtx, ctx.getCollection(), ns, &planCache));
- return list(opCtx, *planCache, ns, cmdObj, bob);
-}
-
-namespace {
-Status listPlansOriginalFormat(std::unique_ptr<CanonicalQuery> cq,
- const PlanCache& planCache,
- BSONObjBuilder* bob) {
- auto lookupResult = planCache.getEntry(*cq);
- if (lookupResult == ErrorCodes::NoSuchKey) {
- // Return empty plans in results if query shape does not
- // exist in plan cache.
- BSONArrayBuilder plansBuilder(bob->subarrayStart("plans"));
- plansBuilder.doneFast();
- return Status::OK();
- } else if (!lookupResult.isOK()) {
- return lookupResult.getStatus();
- }
-
- auto entry = std::move(lookupResult.getValue());
-
- BSONArrayBuilder plansBuilder(bob->subarrayStart("plans"));
-
- size_t numPlans = entry->plannerData.size();
- invariant(numPlans == entry->decision->stats.size());
- invariant(numPlans ==
- entry->decision->scores.size() + entry->decision->failedCandidates.size());
- invariant(entry->decision->candidateOrder.size() == entry->decision->scores.size());
- for (size_t i = 0; i < numPlans; ++i) {
- BSONObjBuilder planBob(plansBuilder.subobjStart());
-
- // Create the plan details field. Currently, this is a simple string representation of
- // SolutionCacheData.
- BSONObjBuilder detailsBob(planBob.subobjStart("details"));
- detailsBob.append("solution", entry->plannerData[i]->toString());
- detailsBob.doneFast();
-
- // reason is comprised of score and initial stats provided by
- // multi plan runner.
- BSONObjBuilder reasonBob(planBob.subobjStart("reason"));
- if (i < entry->decision->candidateOrder.size()) {
- reasonBob.append("score", entry->decision->scores[i]);
- } else {
- reasonBob.append("score", 0.0);
- reasonBob.append("failed", true);
- }
- BSONObjBuilder statsBob(reasonBob.subobjStart("stats"));
- PlanStageStats* stats = entry->decision->stats[i].get();
- if (stats) {
- Explain::statsToBSON(*stats, &statsBob);
- }
- statsBob.doneFast();
- reasonBob.doneFast();
-
- // BSON object for 'feedback' field shows scores from historical executions of the plan.
- BSONObjBuilder feedbackBob(planBob.subobjStart("feedback"));
- if (i == 0U) {
- feedbackBob.append("nfeedback", int(entry->feedback.size()));
- BSONArrayBuilder scoresBob(feedbackBob.subarrayStart("scores"));
- for (size_t i = 0; i < entry->feedback.size(); ++i) {
- BSONObjBuilder scoreBob(scoresBob.subobjStart());
- scoreBob.append("score", entry->feedback[i]);
- }
- scoresBob.doneFast();
- }
- feedbackBob.doneFast();
-
- planBob.append("filterSet", entry->plannerData[i]->indexFilterApplied);
- }
-
- plansBuilder.doneFast();
-
- // Append the time the entry was inserted into the plan cache.
- bob->append("timeOfCreation", entry->timeOfCreation);
- bob->append("queryHash", unsignedIntToFixedLengthHex(entry->queryHash));
- bob->append("planCacheKey", unsignedIntToFixedLengthHex(entry->planCacheKey));
- // Append whether or not the entry is active.
- bob->append("isActive", entry->isActive);
- bob->append("works", static_cast<long long>(entry->works));
- return Status::OK();
-}
-} // namespace
-
-// static
-Status PlanCacheListPlansDeprecated::list(OperationContext* opCtx,
- const PlanCache& planCache,
- const std::string& ns,
- const BSONObj& cmdObj,
- BSONObjBuilder* bob) {
- auto statusWithCQ = canonicalize(opCtx, ns, cmdObj);
- if (!statusWithCQ.isOK()) {
- return statusWithCQ.getStatus();
- }
-
- if (!internalQueryCacheListPlansNewOutput.load())
- return listPlansOriginalFormat(std::move(statusWithCQ.getValue()), planCache, bob);
-
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
- auto entry = uassertStatusOK(planCache.getEntry(*cq));
-
- // internalQueryCacheDisableInactiveEntries is True and we should use the new output format.
- Explain::planCacheEntryToBSON(*entry, bob);
- return Status::OK();
-}
-
-} // namespace mongo
+} // namespace mongo::plan_cache_commands
diff --git a/src/mongo/db/commands/plan_cache_commands.h b/src/mongo/db/commands/plan_cache_commands.h
index e800258be1b..765e6d254ca 100644
--- a/src/mongo/db/commands/plan_cache_commands.h
+++ b/src/mongo/db/commands/plan_cache_commands.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2018-present MongoDB, Inc.
+ * Copyright (C) 2019-present MongoDB, Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the Server Side Public License, version 1,
@@ -29,166 +29,18 @@
#pragma once
-#include "mongo/db/commands.h"
-#include "mongo/db/query/plan_cache.h"
-#include "mongo/util/debug_util.h"
+#include "mongo/bson/bsonobj.h"
+#include "mongo/db/operation_context.h"
+#include "mongo/db/query/canonical_query.h"
-namespace mongo {
+namespace mongo::plan_cache_commands {
/**
- * DB commands for plan cache.
- * These are in a header to facilitate unit testing. See plan_cache_commands_test.cpp.
+ * Parses the plan cache command specified by 'ns' and 'cmdObj' and returns the query shape inside
+ * that command represented as a CanonicalQuery.
*/
+StatusWith<std::unique_ptr<CanonicalQuery>> canonicalize(OperationContext* opCtx,
+ StringData ns,
+ const BSONObj& cmdObj);
-/**
- * PlanCacheCommand
- * Defines common attributes for all plan cache related commands
- * such as slaveOk.
- */
-class PlanCacheCommand : public BasicCommand {
-public:
- PlanCacheCommand(const std::string& name, const std::string& helpText, ActionType actionType);
-
- /**
- * Entry point from command subsystem.
- * Implementation provides standardization of error handling
- * such as adding error code and message to BSON result.
- *
- * Do not override in derived classes.
- * Override runPlanCacheCommands instead to
- * implement plan cache command functionality.
- */
-
- bool run(OperationContext* opCtx,
- const std::string& dbname,
- const BSONObj& cmdObj,
- BSONObjBuilder& result);
-
- virtual bool supportsWriteConcern(const BSONObj& cmd) const override;
-
- AllowedOnSecondary secondaryAllowed(ServiceContext*) const override;
-
- std::string help() const override;
-
- /**
- * Two action types defined for plan cache commands:
- * - planCacheRead
- * - planCacheWrite
- */
- virtual Status checkAuthForCommand(Client* client,
- const std::string& dbname,
- const BSONObj& cmdObj) const;
- /**
- * Subset of command arguments used by plan cache commands
- * Override to provide command functionality.
- * Should contain just enough logic to invoke run*Command() function
- * in plan_cache.h
- */
- virtual Status runPlanCacheCommand(OperationContext* opCtx,
- const std::string& ns,
- const BSONObj& cmdObj,
- BSONObjBuilder* bob) = 0;
-
- /**
- * Validatess query shape from command object and returns canonical query.
- */
- static StatusWith<std::unique_ptr<CanonicalQuery>> canonicalize(OperationContext* opCtx,
- const std::string& ns,
- const BSONObj& cmdObj);
-
-private:
- std::string helpText;
- ActionType actionType;
-};
-
-/**
- * DEPRECATED. Clients should prefer the $planCacheStats aggregation metadata source.
- *
- * planCacheListQueryShapes
- *
- * { planCacheListQueryShapes: <collection> }
- *
- */
-class PlanCacheListQueryShapesDeprecated : public PlanCacheCommand {
-public:
- PlanCacheListQueryShapesDeprecated();
- virtual Status runPlanCacheCommand(OperationContext* opCtx,
- const std::string& ns,
- const BSONObj& cmdObj,
- BSONObjBuilder* bob);
-
- /**
- * Looks up cache keys for collection's plan cache. Inserts keys for query into BSON builder.
- */
- static Status list(const PlanCache& planCache, BSONObjBuilder* bob);
-
-private:
- // Used to log occasional deprecation warnings when this command is invoked.
- Rarely _sampler;
-};
-
-/**
- * planCacheClear
- *
- * {
- * planCacheClear: <collection>,
- * query: <query>,
- * sort: <sort>,
- * projection: <projection>
- * }
- *
- */
-class PlanCacheClear : public PlanCacheCommand {
-public:
- PlanCacheClear();
- virtual Status runPlanCacheCommand(OperationContext* opCtx,
- const std::string& ns,
- const BSONObj& cmdObj,
- BSONObjBuilder* bob);
-
- /**
- * Clears collection's plan cache.
- * If query shape is provided, clears plans for that single query shape only.
- */
- static Status clear(OperationContext* opCtx,
- PlanCache* planCache,
- const std::string& ns,
- const BSONObj& cmdObj);
-};
-
-/**
- * DEPRECATED. Clients should prefer the $planCacheStats aggregation metadata source.
- *
- * planCacheListPlans
- *
- * {
- * planCacheListPlans: <collection>,
- * query: <query>,
- * sort: <sort>,
- * projection: <projection>
- * }
- *
- */
-class PlanCacheListPlansDeprecated : public PlanCacheCommand {
-public:
- PlanCacheListPlansDeprecated();
- virtual Status runPlanCacheCommand(OperationContext* opCtx,
- const std::string& ns,
- const BSONObj& cmdObj,
- BSONObjBuilder* bob);
-
- /**
- * Displays the cached plans for a query shape.
- */
- static Status list(OperationContext* opCtx,
- const PlanCache& planCache,
- const std::string& ns,
- const BSONObj& cmdObj,
- BSONObjBuilder* bob);
-
-private:
- // Used to log occasional deprecation warnings when this command is invoked.
- Rarely _sampler;
-};
-
-} // namespace mongo
+} // namespace mongo::plan_cache_commands
diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp
index 291bc64eb9b..e6e10d38632 100644
--- a/src/mongo/db/commands/plan_cache_commands_test.cpp
+++ b/src/mongo/db/commands/plan_cache_commands_test.cpp
@@ -27,679 +27,134 @@
* it in the license file.
*/
-/**
- * This file contains tests for mongo/db/commands/plan_cache_commands.h
- */
-
#include "mongo/db/commands/plan_cache_commands.h"
-
-#include <algorithm>
-#include <memory>
-
-#include "mongo/db/json.h"
-#include "mongo/db/operation_context_noop.h"
-#include "mongo/db/query/plan_ranker.h"
-#include "mongo/db/query/query_solution.h"
+#include "mongo/db/namespace_string.h"
+#include "mongo/db/query/plan_cache.h"
#include "mongo/db/query/query_test_service_context.h"
#include "mongo/unittest/unittest.h"
-#include "mongo/util/str.h"
-
-using namespace mongo;
+namespace mongo {
namespace {
-using std::string;
-using std::unique_ptr;
-using std::vector;
-
-static const NamespaceString nss("test.collection");
-
-/**
- * Tests for planCacheListQueryShapes
- */
-
-/**
- * Utility function to get list of keys in the cache.
- */
-std::vector<BSONObj> getShapes(const PlanCache& planCache) {
- BSONObjBuilder bob;
- ASSERT_OK(PlanCacheListQueryShapesDeprecated::list(planCache, &bob));
- BSONObj resultObj = bob.obj();
- BSONElement shapesElt = resultObj.getField("shapes");
- ASSERT_EQUALS(shapesElt.type(), mongo::Array);
- vector<BSONElement> shapesEltArray = shapesElt.Array();
- vector<BSONObj> shapes;
- for (vector<BSONElement>::const_iterator i = shapesEltArray.begin(); i != shapesEltArray.end();
- ++i) {
- const BSONElement& elt = *i;
-
- ASSERT_TRUE(elt.isABSONObj());
- BSONObj obj = elt.Obj();
-
- // Check required fields.
- // query
- BSONElement queryElt = obj.getField("query");
- ASSERT_TRUE(queryElt.isABSONObj());
-
- // sort
- BSONElement sortElt = obj.getField("sort");
- ASSERT_TRUE(sortElt.isABSONObj());
-
- // projection
- BSONElement projectionElt = obj.getField("projection");
- ASSERT_TRUE(projectionElt.isABSONObj());
-
- // collation
- BSONElement collationElt = obj.getField("collation");
- if (!collationElt.eoo()) {
- ASSERT_TRUE(collationElt.isABSONObj());
- }
-
- // All fields OK. Append to vector.
- shapes.push_back(obj.getOwned());
- }
- return shapes;
-}
-
-/**
- * Utility function to create a SolutionCacheData
- */
-SolutionCacheData* createSolutionCacheData() {
- unique_ptr<SolutionCacheData> scd(new SolutionCacheData());
- scd->tree.reset(new PlanCacheIndexTree());
- return scd.release();
-}
-
-/**
- * Utility function to create a PlanRankingDecision
- */
-std::unique_ptr<PlanRankingDecision> createDecision(size_t numPlans, size_t works = 0) {
- unique_ptr<PlanRankingDecision> why(new PlanRankingDecision());
- for (size_t i = 0; i < numPlans; ++i) {
- CommonStats common("COLLSCAN");
- auto stats = std::make_unique<PlanStageStats>(common, STAGE_COLLSCAN);
- stats->specific.reset(new CollectionScanStats());
- why->stats.push_back(std::move(stats));
- why->stats[i]->common.works = works;
- why->scores.push_back(0U);
- why->candidateOrder.push_back(i);
- }
- return why;
-}
+static const NamespaceString nss{"test.collection"_sd};
-TEST(PlanCacheCommandsTest, planCacheListQueryShapesEmpty) {
- PlanCache empty;
- vector<BSONObj> shapes = getShapes(empty);
- ASSERT_TRUE(shapes.empty());
-}
-
-TEST(PlanCacheCommandsTest, planCacheListQueryShapesOneKey) {
+TEST(PlanCacheCommandsTest, CannotCanonicalizeWithMissingQueryField) {
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
-
- // Create a canonical query
- auto qr = std::make_unique<QueryRequest>(nss);
- qr->setFilter(fromjson("{a: 1}"));
- qr->setSort(fromjson("{a: -1}"));
- qr->setProj(fromjson("{_id: 0}"));
- qr->setCollation(fromjson("{locale: 'mock_reverse_string'}"));
- auto statusWithCQ = CanonicalQuery::canonicalize(opCtx.get(), std::move(qr));
- ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
-
- // Plan cache with one entry
- PlanCache planCache;
- QuerySolution qs;
- qs.cacheData.reset(createSolutionCacheData());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- ASSERT_OK(planCache.set(*cq,
- solns,
- createDecision(1U),
- opCtx->getServiceContext()->getPreciseClockSource()->now()));
-
- vector<BSONObj> shapes = getShapes(planCache);
- ASSERT_EQUALS(shapes.size(), 1U);
- ASSERT_BSONOBJ_EQ(shapes[0].getObjectField("query"), cq->getQueryObj());
- ASSERT_BSONOBJ_EQ(shapes[0].getObjectField("sort"), cq->getQueryRequest().getSort());
- ASSERT_BSONOBJ_EQ(shapes[0].getObjectField("projection"), cq->getQueryRequest().getProj());
- ASSERT_BSONOBJ_EQ(shapes[0].getObjectField("collation"), cq->getCollator()->getSpec().toBSON());
+ ASSERT_NOT_OK(
+ plan_cache_commands::canonicalize(opCtx.get(), nss.ns(), fromjson("{}")).getStatus());
}
-/**
- * Tests for planCacheClear
- */
-
-TEST(PlanCacheCommandsTest, planCacheClearAllShapes) {
+TEST(PlanCacheCommandsTest, CannotCanonicalizeWhenQueryFieldIsNotObject) {
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
-
- // Create a canonical query
- auto qr = std::make_unique<QueryRequest>(nss);
- qr->setFilter(fromjson("{a: 1}"));
- auto statusWithCQ = CanonicalQuery::canonicalize(opCtx.get(), std::move(qr));
- ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
-
- // Plan cache with one entry
- PlanCache planCache;
- QuerySolution qs;
-
- qs.cacheData.reset(createSolutionCacheData());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- ASSERT_OK(planCache.set(*cq,
- solns,
- createDecision(1U),
- opCtx->getServiceContext()->getPreciseClockSource()->now()));
- ASSERT_EQUALS(getShapes(planCache).size(), 1U);
-
- // Clear cache and confirm number of keys afterwards.
- ASSERT_OK(PlanCacheClear::clear(opCtx.get(), &planCache, nss.ns(), BSONObj()));
- ASSERT_EQUALS(getShapes(planCache).size(), 0U);
+ ASSERT_NOT_OK(plan_cache_commands::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: 1}"))
+ .getStatus());
}
-/**
- * Tests for PlanCacheCommand::makeCacheKey
- * Mostly validation on the input parameters
- */
-
-TEST(PlanCacheCommandsTest, Canonicalize) {
- // Invalid parameters
- PlanCache planCache;
+TEST(PlanCacheCommandsTest, CannotCanonicalizeWhenSortFieldIsNotObject) {
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
-
- // Missing query field
- ASSERT_NOT_OK(
- PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{}")).getStatus());
- // Query needs to be an object
- ASSERT_NOT_OK(
- PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: 1}")).getStatus());
- // Sort needs to be an object
- ASSERT_NOT_OK(
- PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {}, sort: 1}"))
- .getStatus());
- // Projection needs to be an object.
- ASSERT_NOT_OK(PlanCacheCommand::canonicalize(
- opCtx.get(), nss.ns(), fromjson("{query: {}, projection: 1}"))
- .getStatus());
- // Collation needs to be an object.
- ASSERT_NOT_OK(
- PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {}, collation: 1}"))
- .getStatus());
- // Bad query (invalid sort order)
ASSERT_NOT_OK(
- PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {}, sort: {a: 0}}"))
+ plan_cache_commands::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {}, sort: 1}"))
.getStatus());
-
- // Valid parameters
- auto statusWithCQ =
- PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}}"));
- ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> query = std::move(statusWithCQ.getValue());
-
- // Equivalent query should generate same key.
- statusWithCQ =
- PlanCacheCommand::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {b: 1, a: 1}}"));
- ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> equivQuery = std::move(statusWithCQ.getValue());
- ASSERT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*equivQuery));
-
- // Sort query should generate different key from unsorted query.
- statusWithCQ = PlanCacheCommand::canonicalize(
- opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {a: 1, b: 1}}"));
- ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> sortQuery1 = std::move(statusWithCQ.getValue());
- ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*sortQuery1));
-
- // Confirm sort arguments are properly delimited (SERVER-17158)
- statusWithCQ = PlanCacheCommand::canonicalize(
- opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {aab: 1}}"));
- ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> sortQuery2 = std::move(statusWithCQ.getValue());
- ASSERT_NOT_EQUALS(planCache.computeKey(*sortQuery1), planCache.computeKey(*sortQuery2));
-
- // Changing order and/or value of predicates should not change key
- statusWithCQ = PlanCacheCommand::canonicalize(
- opCtx.get(), nss.ns(), fromjson("{query: {b: 3, a: 3}, sort: {a: 1, b: 1}}"));
- ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> sortQuery3 = std::move(statusWithCQ.getValue());
- ASSERT_EQUALS(planCache.computeKey(*sortQuery1), planCache.computeKey(*sortQuery3));
-
- // Projected query should generate different key from unprojected query.
- statusWithCQ = PlanCacheCommand::canonicalize(
- opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, projection: {_id: 0, a: 1}}"));
- ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> projectionQuery = std::move(statusWithCQ.getValue());
- ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*projectionQuery));
-}
-
-/**
- * Tests for planCacheClear (single query shape)
- */
-
-TEST(PlanCacheCommandsTest, planCacheClearInvalidParameter) {
- PlanCache planCache;
- OperationContextNoop opCtx;
-
- // Query field type must be BSON object.
- ASSERT_NOT_OK(PlanCacheClear::clear(&opCtx, &planCache, nss.ns(), fromjson("{query: 12345}")));
- ASSERT_NOT_OK(
- PlanCacheClear::clear(&opCtx, &planCache, nss.ns(), fromjson("{query: /keyisnotregex/}")));
- // Query must pass canonicalization.
- ASSERT_NOT_OK(PlanCacheClear::clear(
- &opCtx, &planCache, nss.ns(), fromjson("{query: {a: {$no_such_op: 1}}}")));
- // Sort present without query is an error.
- ASSERT_NOT_OK(PlanCacheClear::clear(&opCtx, &planCache, nss.ns(), fromjson("{sort: {a: 1}}")));
- // Projection present without query is an error.
- ASSERT_NOT_OK(PlanCacheClear::clear(
- &opCtx, &planCache, nss.ns(), fromjson("{projection: {_id: 0, a: 1}}")));
- // Collation present without query is an error.
- ASSERT_NOT_OK(PlanCacheClear::clear(
- &opCtx, &planCache, nss.ns(), fromjson("{collation: {locale: 'en_US'}}")));
}
-TEST(PlanCacheCommandsTest, planCacheClearUnknownKey) {
- PlanCache planCache;
- OperationContextNoop opCtx;
-
- ASSERT_OK(PlanCacheClear::clear(&opCtx, &planCache, nss.ns(), fromjson("{query: {a: 1}}")));
-}
-
-TEST(PlanCacheCommandsTest, planCacheClearOneKey) {
+TEST(PlanCacheCommandsTest, CannotCanonicalizeWhenProjectionFieldIsNotObject) {
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
-
- // Create 2 canonical queries.
- auto qrA = std::make_unique<QueryRequest>(nss);
- qrA->setFilter(fromjson("{a: 1}"));
- auto statusWithCQA = CanonicalQuery::canonicalize(opCtx.get(), std::move(qrA));
- ASSERT_OK(statusWithCQA.getStatus());
- auto qrB = std::make_unique<QueryRequest>(nss);
- qrB->setFilter(fromjson("{b: 1}"));
- unique_ptr<CanonicalQuery> cqA = std::move(statusWithCQA.getValue());
- auto statusWithCQB = CanonicalQuery::canonicalize(opCtx.get(), std::move(qrB));
- ASSERT_OK(statusWithCQB.getStatus());
- unique_ptr<CanonicalQuery> cqB = std::move(statusWithCQB.getValue());
-
- // Create plan cache with 2 entries.
- PlanCache planCache;
- QuerySolution qs;
- qs.cacheData.reset(createSolutionCacheData());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- ASSERT_OK(planCache.set(*cqA,
- solns,
- createDecision(1U),
- opCtx->getServiceContext()->getPreciseClockSource()->now()));
- ASSERT_OK(planCache.set(*cqB,
- solns,
- createDecision(1U),
- opCtx->getServiceContext()->getPreciseClockSource()->now()));
-
- // Check keys in cache before dropping {b: 1}
- vector<BSONObj> shapesBefore = getShapes(planCache);
- ASSERT_EQUALS(shapesBefore.size(), 2U);
- BSONObj shapeA =
- BSON("query" << cqA->getQueryObj() << "sort" << cqA->getQueryRequest().getSort()
- << "projection" << cqA->getQueryRequest().getProj());
- BSONObj shapeB =
- BSON("query" << cqB->getQueryObj() << "sort" << cqB->getQueryRequest().getSort()
- << "projection" << cqB->getQueryRequest().getProj());
- ASSERT_TRUE(
- std::find_if(shapesBefore.begin(), shapesBefore.end(), [&shapeA](const BSONObj& obj) {
- auto filteredObj = obj.removeField("queryHash");
- return SimpleBSONObjComparator::kInstance.evaluate(shapeA == filteredObj);
- }) != shapesBefore.end());
- ASSERT_TRUE(
- std::find_if(shapesBefore.begin(), shapesBefore.end(), [&shapeB](const BSONObj& obj) {
- auto filteredObj = obj.removeField("queryHash");
- return SimpleBSONObjComparator::kInstance.evaluate(shapeB == filteredObj);
- }) != shapesBefore.end());
-
- // Drop {b: 1} from cache. Make sure {a: 1} is still in cache afterwards.
- BSONObjBuilder bob;
-
- ASSERT_OK(PlanCacheClear::clear(
- opCtx.get(), &planCache, nss.ns(), BSON("query" << cqB->getQueryObj())));
- vector<BSONObj> shapesAfter = getShapes(planCache);
- ASSERT_EQUALS(shapesAfter.size(), 1U);
- auto filteredShape0 = shapesAfter[0].removeField("queryHash");
- ASSERT_BSONOBJ_EQ(filteredShape0, shapeA);
+ ASSERT_NOT_OK(plan_cache_commands::canonicalize(
+ opCtx.get(), nss.ns(), fromjson("{query: {}, projection: 1}"))
+ .getStatus());
}
-TEST(PlanCacheCommandsTest, planCacheClearOneKeyCollation) {
+TEST(PlanCacheCommandsTest, CannotCanonicalizeWhenCollationFieldIsNotObject) {
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
-
- // Create 2 canonical queries, one with collation.
- auto qr = std::make_unique<QueryRequest>(nss);
- qr->setFilter(fromjson("{a: 'foo'}"));
- auto statusWithCQ = CanonicalQuery::canonicalize(opCtx.get(), std::move(qr));
- ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
- auto qrCollation = std::make_unique<QueryRequest>(nss);
- qrCollation->setFilter(fromjson("{a: 'foo'}"));
- qrCollation->setCollation(fromjson("{locale: 'mock_reverse_string'}"));
- auto statusWithCQCollation = CanonicalQuery::canonicalize(opCtx.get(), std::move(qrCollation));
- ASSERT_OK(statusWithCQCollation.getStatus());
- unique_ptr<CanonicalQuery> cqCollation = std::move(statusWithCQCollation.getValue());
-
- // Create plan cache with 2 entries. Add an index so that indexability is included in the plan
- // cache keys.
- PlanCache planCache;
- const auto keyPattern = fromjson("{a: 1}");
- planCache.notifyOfIndexUpdates(
- {CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- false, // sparse
- IndexEntry::Identifier{"indexName"})}); // name
-
- QuerySolution qs;
- qs.cacheData.reset(createSolutionCacheData());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- ASSERT_OK(planCache.set(*cq,
- solns,
- createDecision(1U),
- opCtx->getServiceContext()->getPreciseClockSource()->now()));
- ASSERT_OK(planCache.set(*cqCollation,
- solns,
- createDecision(1U),
- opCtx->getServiceContext()->getPreciseClockSource()->now()));
-
- // Check keys in cache before dropping the query with collation.
- vector<BSONObj> shapesBefore = getShapes(planCache);
- ASSERT_EQUALS(shapesBefore.size(), 2U);
- BSONObj shape = BSON("query" << cq->getQueryObj() << "sort" << cq->getQueryRequest().getSort()
- << "projection" << cq->getQueryRequest().getProj());
- BSONObj shapeWithCollation = BSON(
- "query" << cqCollation->getQueryObj() << "sort" << cqCollation->getQueryRequest().getSort()
- << "projection" << cqCollation->getQueryRequest().getProj() << "collation"
- << cqCollation->getCollator()->getSpec().toBSON());
- ASSERT_TRUE(
- std::find_if(shapesBefore.begin(), shapesBefore.end(), [&shape](const BSONObj& obj) {
- auto filteredObj = obj.removeField("queryHash");
- return SimpleBSONObjComparator::kInstance.evaluate(shape == filteredObj);
- }) != shapesBefore.end());
- ASSERT_TRUE(std::find_if(shapesBefore.begin(),
- shapesBefore.end(),
- [&shapeWithCollation](const BSONObj& obj) {
- auto filteredObj = obj.removeField("queryHash");
- return SimpleBSONObjComparator::kInstance.evaluate(
- shapeWithCollation == filteredObj);
- }) != shapesBefore.end());
-
- // Drop query with collation from cache. Make other query is still in cache afterwards.
- BSONObjBuilder bob;
-
- ASSERT_OK(PlanCacheClear::clear(opCtx.get(), &planCache, nss.ns(), shapeWithCollation));
- vector<BSONObj> shapesAfter = getShapes(planCache);
- ASSERT_EQUALS(shapesAfter.size(), 1U);
- auto filteredShape0 = shapesAfter[0].removeField("queryHash");
- ASSERT_BSONOBJ_EQ(filteredShape0, shape);
-}
-
-/**
- * Tests for planCacheListPlans
- */
-
-/**
- * Function to extract plan ID from BSON element.
- * Validates planID during extraction.
- * Each BSON element contains an embedded BSON object with the following layout:
- * {
- * plan: <plan_id>,
- * details: <plan_details>,
- * reason: <ranking_stats>,
- * feedback: <execution_stats>,
- * source: <source>
- * }
- * Compilation note: GCC 4.4 has issues with getPlan() declared as a function object.
- */
-BSONObj getPlan(const BSONElement& elt) {
- ASSERT_TRUE(elt.isABSONObj());
- BSONObj obj = elt.Obj();
-
- // Check required fields.
- // details
- BSONElement detailsElt = obj.getField("details");
- ASSERT_TRUE(detailsElt.isABSONObj());
-
- // reason
- BSONElement reasonElt = obj.getField("reason");
- ASSERT_TRUE(reasonElt.isABSONObj());
-
- // feedback
- BSONElement feedbackElt = obj.getField("feedback");
- ASSERT_TRUE(feedbackElt.isABSONObj());
-
- return obj.getOwned();
+ ASSERT_NOT_OK(plan_cache_commands::canonicalize(
+ opCtx.get(), nss.ns(), fromjson("{query: {}, collation: 1}"))
+ .getStatus());
}
-BSONObj getCmdResult(const PlanCache& planCache,
- const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& projection,
- const BSONObj& collation) {
-
+TEST(PlanCacheCommandsTest, CannotCanonicalizeWhenSortObjectIsMalformed) {
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
-
- BSONObjBuilder bob;
- BSONObjBuilder cmdObjBuilder;
- cmdObjBuilder.append("query", query);
- cmdObjBuilder.append("sort", sort);
- cmdObjBuilder.append("projection", projection);
- if (!collation.isEmpty()) {
- cmdObjBuilder.append("collation", collation);
- }
- BSONObj cmdObj = cmdObjBuilder.obj();
- ASSERT_OK(PlanCacheListPlansDeprecated::list(opCtx.get(), planCache, nss.ns(), cmdObj, &bob));
- BSONObj resultObj = bob.obj();
-
- return resultObj;
-}
-
-/**
- * Utility function to get list of plan IDs for a query in the cache.
- */
-vector<BSONObj> getPlans(const PlanCache& planCache,
- const BSONObj& query,
- const BSONObj& sort,
- const BSONObj& projection,
- const BSONObj& collation) {
- BSONObj resultObj = getCmdResult(planCache, query, sort, projection, collation);
- ASSERT_TRUE(resultObj.hasField("isActive"));
- ASSERT_TRUE(resultObj.hasField("works"));
-
- BSONElement plansElt = resultObj.getField("plans");
- ASSERT_EQUALS(plansElt.type(), mongo::Array);
- vector<BSONElement> planEltArray = plansElt.Array();
- ASSERT_FALSE(planEltArray.empty());
- vector<BSONObj> plans(planEltArray.size());
- std::transform(planEltArray.begin(), planEltArray.end(), plans.begin(), getPlan);
- return plans;
-}
-
-TEST(PlanCacheCommandsTest, planCacheListPlansInvalidParameter) {
- PlanCache planCache;
- BSONObjBuilder ignored;
- OperationContextNoop opCtx;
-
- // Missing query field is not ok.
- ASSERT_NOT_OK(
- PlanCacheListPlansDeprecated::list(&opCtx, planCache, nss.ns(), BSONObj(), &ignored));
- // Query field type must be BSON object.
- ASSERT_NOT_OK(PlanCacheListPlansDeprecated::list(
- &opCtx, planCache, nss.ns(), fromjson("{query: 12345}"), &ignored));
- ASSERT_NOT_OK(PlanCacheListPlansDeprecated::list(
- &opCtx, planCache, nss.ns(), fromjson("{query: /keyisnotregex/}"), &ignored));
+ ASSERT_NOT_OK(plan_cache_commands::canonicalize(
+ opCtx.get(), nss.ns(), fromjson("{query: {}, sort: {a: 0}}"))
+ .getStatus());
}
-TEST(PlanCacheCommandsTest, planCacheListPlansUnknownKey) {
- // Leave the plan cache empty.
+TEST(PlanCacheCommandsTest, CanCanonicalizeWithValidQuery) {
PlanCache planCache;
- OperationContextNoop opCtx;
- BSONObjBuilder ignored;
- ASSERT_OK(PlanCacheListPlansDeprecated::list(
- &opCtx, planCache, nss.ns(), fromjson("{query: {a: 1}}"), &ignored));
-}
-
-TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionTrue) {
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
+ auto statusWithCQ =
+ plan_cache_commands::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}}"));
+ ASSERT_OK(statusWithCQ.getStatus());
+ std::unique_ptr<CanonicalQuery> query = std::move(statusWithCQ.getValue());
- // Create a canonical query
- auto qr = std::make_unique<QueryRequest>(nss);
- qr->setFilter(fromjson("{a: 1}"));
- auto statusWithCQ = CanonicalQuery::canonicalize(opCtx.get(), std::move(qr));
+ // Equivalent query should generate same key.
+ statusWithCQ =
+ plan_cache_commands::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {b: 3, a: 4}}"));
ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
+ std::unique_ptr<CanonicalQuery> equivQuery = std::move(statusWithCQ.getValue());
+ ASSERT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*equivQuery));
+}
- // Plan cache with one entry
+TEST(PlanCacheCommandsTest, SortQueryResultsInDifferentPlanCacheKeyFromUnsorted) {
PlanCache planCache;
- QuerySolution qs;
- qs.cacheData.reset(createSolutionCacheData());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- ASSERT_OK(planCache.set(*cq,
- solns,
- createDecision(1U, 123),
- opCtx->getServiceContext()->getPreciseClockSource()->now()));
-
- BSONObj resultObj = getCmdResult(planCache,
- cq->getQueryObj(),
- cq->getQueryRequest().getSort(),
- cq->getQueryRequest().getProj(),
- cq->getQueryRequest().getCollation());
- ASSERT_EQ(resultObj["plans"].Array().size(), 1u);
- ASSERT_EQ(resultObj.getBoolField("isActive"), false);
- ASSERT_EQ(resultObj.getIntField("works"), 123L);
-}
-
-TEST(PlanCacheCommandsTest, planCacheListPlansOnlyOneSolutionFalse) {
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
-
- // Create a canonical query
- auto qr = std::make_unique<QueryRequest>(nss);
- qr->setFilter(fromjson("{a: 1}"));
- auto statusWithCQ = CanonicalQuery::canonicalize(opCtx.get(), std::move(qr));
+ auto statusWithCQ =
+ plan_cache_commands::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
-
- // Plan cache with one entry
- PlanCache planCache;
- QuerySolution qs;
- qs.cacheData.reset(createSolutionCacheData());
- // Add cache entry with 2 solutions.
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- solns.push_back(&qs);
- ASSERT_OK(planCache.set(*cq,
- solns,
- createDecision(2U, 333),
- opCtx->getServiceContext()->getPreciseClockSource()->now()));
+ std::unique_ptr<CanonicalQuery> query = std::move(statusWithCQ.getValue());
- BSONObj resultObj = getCmdResult(planCache,
- cq->getQueryObj(),
- cq->getQueryRequest().getSort(),
- cq->getQueryRequest().getProj(),
- cq->getQueryRequest().getCollation());
-
- ASSERT_EQ(resultObj["plans"].Array().size(), 2u);
- ASSERT_EQ(resultObj.getBoolField("isActive"), false);
- ASSERT_EQ(resultObj.getIntField("works"), 333);
+ // Sort query should generate different key from unsorted query.
+ statusWithCQ = plan_cache_commands::canonicalize(
+ opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {a: 1, b: 1}}"));
+ ASSERT_OK(statusWithCQ.getStatus());
+ std::unique_ptr<CanonicalQuery> sortQuery = std::move(statusWithCQ.getValue());
+ ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*sortQuery));
}
+// Regression test for SERVER-17158.
+TEST(PlanCacheCommandsTest, SortsAreProperlyDelimitedInPlanCacheKey) {
+ PlanCache planCache;
-TEST(PlanCacheCommandsTest, planCacheListPlansCollation) {
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
+ auto statusWithCQ = plan_cache_commands::canonicalize(
+ opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {a: 1, b: 1}}"));
+ ASSERT_OK(statusWithCQ.getStatus());
+ std::unique_ptr<CanonicalQuery> sortQuery1 = std::move(statusWithCQ.getValue());
- // Create 2 canonical queries, one with collation.
- auto qr = std::make_unique<QueryRequest>(nss);
- qr->setFilter(fromjson("{a: 'foo'}"));
- auto statusWithCQ = CanonicalQuery::canonicalize(opCtx.get(), std::move(qr));
+ // Confirm sort arguments are properly delimited (SERVER-17158)
+ statusWithCQ = plan_cache_commands::canonicalize(
+ opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {aab: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
- unique_ptr<CanonicalQuery> cq = std::move(statusWithCQ.getValue());
- auto qrCollation = std::make_unique<QueryRequest>(nss);
- qrCollation->setFilter(fromjson("{a: 'foo'}"));
- qrCollation->setCollation(fromjson("{locale: 'mock_reverse_string'}"));
- auto statusWithCQCollation = CanonicalQuery::canonicalize(opCtx.get(), std::move(qrCollation));
- ASSERT_OK(statusWithCQCollation.getStatus());
- unique_ptr<CanonicalQuery> cqCollation = std::move(statusWithCQCollation.getValue());
+ std::unique_ptr<CanonicalQuery> sortQuery2 = std::move(statusWithCQ.getValue());
+ ASSERT_NOT_EQUALS(planCache.computeKey(*sortQuery1), planCache.computeKey(*sortQuery2));
+}
- // Create plan cache with 2 entries. Add an index so that indexability is included in the plan
- // cache keys. Give query with collation two solutions.
+TEST(PlanCacheCommandsTest, ProjectQueryResultsInDifferentPlanCacheKeyFromUnprojected) {
PlanCache planCache;
- const auto keyPattern = fromjson("{a: 1}");
- planCache.notifyOfIndexUpdates(
- {CoreIndexInfo(keyPattern,
- IndexNames::nameToType(IndexNames::findPluginName(keyPattern)),
- false, // sparse
- IndexEntry::Identifier{"indexName"})}); // name
-
- QuerySolution qs;
- qs.cacheData.reset(createSolutionCacheData());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- ASSERT_OK(planCache.set(*cq,
- solns,
- createDecision(1U),
- opCtx->getServiceContext()->getPreciseClockSource()->now()));
- std::vector<QuerySolution*> twoSolns;
- twoSolns.push_back(&qs);
- twoSolns.push_back(&qs);
- ASSERT_OK(planCache.set(*cqCollation,
- twoSolns,
- createDecision(2U),
- opCtx->getServiceContext()->getPreciseClockSource()->now()));
-
- // Normal query should have one solution.
- vector<BSONObj> plans = getPlans(planCache,
- cq->getQueryObj(),
- cq->getQueryRequest().getSort(),
- cq->getQueryRequest().getProj(),
- cq->getQueryRequest().getCollation());
- ASSERT_EQUALS(plans.size(), 1U);
- // Query with collation should have two solutions.
- vector<BSONObj> plansCollation = getPlans(planCache,
- cqCollation->getQueryObj(),
- cqCollation->getQueryRequest().getSort(),
- cqCollation->getQueryRequest().getProj(),
- cqCollation->getQueryRequest().getCollation());
- ASSERT_EQUALS(plansCollation.size(), 2U);
-}
-
-TEST(PlanCacheCommandsTest, planCacheListPlansTimeOfCreationIsCorrect) {
QueryTestServiceContext serviceContext;
auto opCtx = serviceContext.makeOperationContext();
-
- // Create a canonical query.
- auto qr = std::make_unique<QueryRequest>(nss);
- qr->setFilter(fromjson("{a: 1}"));
- auto statusWithCQ = CanonicalQuery::canonicalize(opCtx.get(), std::move(qr));
+ auto statusWithCQ =
+ plan_cache_commands::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}}"));
ASSERT_OK(statusWithCQ.getStatus());
- auto cq = std::move(statusWithCQ.getValue());
-
- // Plan cache with one entry.
- PlanCache planCache;
- QuerySolution qs;
- qs.cacheData.reset(createSolutionCacheData());
- std::vector<QuerySolution*> solns;
- solns.push_back(&qs);
- auto now = opCtx->getServiceContext()->getPreciseClockSource()->now();
- ASSERT_OK(planCache.set(*cq, solns, createDecision(1U), now));
-
- auto entry = unittest::assertGet(planCache.getEntry(*cq));
+ std::unique_ptr<CanonicalQuery> query = std::move(statusWithCQ.getValue());
- ASSERT_EQ(entry->timeOfCreation, now);
+ statusWithCQ = plan_cache_commands::canonicalize(
+ opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, projection: {_id: 0, a: 1}}"));
+ ASSERT_OK(statusWithCQ.getStatus());
+ std::unique_ptr<CanonicalQuery> projectionQuery = std::move(statusWithCQ.getValue());
+ ASSERT_NOT_EQUALS(planCache.computeKey(*query), planCache.computeKey(*projectionQuery));
}
} // namespace
+} // namespace mongo
diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h
index 06e648be653..4d448650808 100644
--- a/src/mongo/db/query/plan_cache.h
+++ b/src/mongo/db/query/plan_cache.h
@@ -554,8 +554,7 @@ public:
PlanCacheKey computeKey(const CanonicalQuery&) const;
/**
- * Returns a copy of a cache entry.
- * Used by planCacheListPlans to display plan details.
+ * Returns a copy of a cache entry, looked up by CanonicalQuery.
*
* If there is no entry in the cache for the 'query', returns an error Status.
*/
diff --git a/src/mongo/db/query/query_knobs.idl b/src/mongo/db/query/query_knobs.idl
index 62a47e4767c..0ce3780596e 100644
--- a/src/mongo/db/query/query_knobs.idl
+++ b/src/mongo/db/query/query_knobs.idl
@@ -43,7 +43,7 @@ server_parameters:
cpp_varname: "internalQueryPlanEvaluationWorks"
cpp_vartype: AtomicWord<int>
default: 10000
- validator:
+ validator:
gt: 0
internalQueryPlanEvaluationCollFraction:
@@ -52,7 +52,7 @@ server_parameters:
cpp_varname: "internalQueryPlanEvaluationCollFraction"
cpp_vartype: AtomicDouble
default: 0.3
- validator:
+ validator:
gte: 0.0
lte: 1.0
@@ -62,16 +62,16 @@ server_parameters:
cpp_varname: "internalQueryPlanEvaluationMaxResults"
cpp_vartype: AtomicWord<int>
default: 101
- validator:
+ validator:
gte: 0
-
+
internalQueryForceIntersectionPlans:
description: "Do we give a big ranking bonus to intersection plans?"
set_at: [ startup, runtime ]
cpp_varname: "internalQueryForceIntersectionPlans"
cpp_vartype: AtomicWord<bool>
default: false
-
+
internalQueryPlannerEnableIndexIntersection:
description: "Do we have ixisect on at all?"
set_at: [ startup, runtime ]
@@ -85,7 +85,7 @@ server_parameters:
cpp_varname: "internalQueryPlannerEnableHashIntersection"
cpp_vartype: AtomicWord<bool>
default: false
-
+
#
# Plan cache
#
@@ -95,7 +95,7 @@ server_parameters:
cpp_varname: "internalQueryCacheSize"
cpp_vartype: AtomicWord<int>
default: 5000
- validator:
+ validator:
gte: 0
internalQueryCacheFeedbacksStored:
@@ -104,7 +104,7 @@ server_parameters:
cpp_varname: "internalQueryCacheFeedbacksStored"
cpp_vartype: AtomicWord<int>
default: 20
- validator:
+ validator:
gte: 0
internalQueryCacheEvictionRatio:
@@ -113,7 +113,7 @@ server_parameters:
cpp_varname: "internalQueryCacheEvictionRatio"
cpp_vartype: AtomicDouble
default: 10.0
- validator:
+ validator:
gte: 0.0
internalQueryCacheWorksGrowthCoefficient:
@@ -122,9 +122,9 @@ server_parameters:
cpp_varname: "internalQueryCacheWorksGrowthCoefficient"
cpp_vartype: AtomicDouble
default: 2.0
- validator:
+ validator:
gt: 1.0
-
+
internalQueryCacheDisableInactiveEntries:
description: "Whether or not cache entries can be marked as 'inactive'."
set_at: [ startup, runtime ]
@@ -132,13 +132,6 @@ server_parameters:
cpp_vartype: AtomicWord<bool>
default: false
- internalQueryCacheListPlansNewOutput:
- description: "Whether or not planCacheListPlans uses the new output format."
- set_at: [ startup, runtime ]
- cpp_varname: "internalQueryCacheListPlansNewOutput"
- cpp_vartype: AtomicWord<bool>
- default: false
-
#
# Planning and enumeration
#
@@ -148,7 +141,7 @@ server_parameters:
cpp_varname: "internalQueryPlannerMaxIndexedSolutions"
cpp_vartype: AtomicWord<int>
default: 64
- validator:
+ validator:
gte: 0
internalQueryEnumerationMaxOrSolutions:
@@ -157,7 +150,7 @@ server_parameters:
cpp_varname: "internalQueryEnumerationMaxOrSolutions"
cpp_vartype: AtomicWord<int>
default: 10
- validator:
+ validator:
gte: 0
internalQueryEnumerationMaxIntersectPerAnd:
@@ -166,7 +159,7 @@ server_parameters:
cpp_varname: "internalQueryEnumerationMaxIntersectPerAnd"
cpp_vartype: AtomicWord<int>
default: 3
- validator:
+ validator:
gte: 0
internalQueryPlanOrChildrenIndependently:
@@ -182,7 +175,7 @@ server_parameters:
cpp_varname: "internalQueryMaxScansToExplode"
cpp_vartype: AtomicWord<int>
default: 200
- validator:
+ validator:
gte: 0
internalQueryPlannerGenerateCoveredWholeIndexScans:
@@ -209,9 +202,9 @@ server_parameters:
set_at: [ startup, runtime ]
cpp_varname: "internalQueryMaxBlockingSortMemoryUsageBytes"
cpp_vartype: AtomicWord<int>
- default:
+ default:
expr: 100 * 1024 * 1024
- validator:
+ validator:
gte: 0
internalQueryExecYieldIterations:
@@ -227,7 +220,7 @@ server_parameters:
cpp_varname: "internalQueryExecYieldPeriodMS"
cpp_vartype: AtomicWord<int>
default: 10
- validator:
+ validator:
gte: 0
internalQueryFacetBufferSizeBytes:
@@ -235,9 +228,9 @@ server_parameters:
set_at: [ startup, runtime ]
cpp_varname: "internalQueryFacetBufferSizeBytes"
cpp_vartype: AtomicWord<int>
- default:
+ default:
expr: 100 * 1024 * 1024
- validator:
+ validator:
gt: 0
internalLookupStageIntermediateDocumentMaxSizeBytes:
@@ -245,9 +238,9 @@ server_parameters:
set_at: [ startup, runtime ]
cpp_varname: "internalLookupStageIntermediateDocumentMaxSizeBytes"
cpp_vartype: AtomicWord<long long>
- default:
+ default:
expr: 100 * 1024 * 1024
- validator:
+ validator:
gte: { expr: BSONObjMaxInternalSize}
internalDocumentSourceGroupMaxMemoryBytes:
@@ -255,9 +248,9 @@ server_parameters:
set_at: [ startup, runtime ]
cpp_varname: "internalDocumentSourceGroupMaxMemoryBytes"
cpp_vartype: AtomicWord<long long>
- default:
+ default:
expr: 100 * 1024 * 1024
- validator:
+ validator:
gt: 0
internalInsertMaxBatchSize:
@@ -265,10 +258,10 @@ server_parameters:
set_at: [ startup, runtime ]
cpp_varname: "internalInsertMaxBatchSize"
cpp_vartype: AtomicWord<int>
- default:
+ default:
expr: internalQueryExecYieldIterations.load() / 2
is_constexpr: false
- validator:
+ validator:
gt: 0
internalDocumentSourceCursorBatchSizeBytes:
@@ -276,9 +269,9 @@ server_parameters:
set_at: [ startup, runtime ]
cpp_varname: "internalDocumentSourceCursorBatchSizeBytes"
cpp_vartype: AtomicWord<int>
- default:
+ default:
expr: 4 * 1024 * 1024
- validator:
+ validator:
gte: 0
internalDocumentSourceLookupCacheSizeBytes:
@@ -286,9 +279,9 @@ server_parameters:
set_at: [ startup, runtime ]
cpp_varname: "internalDocumentSourceLookupCacheSizeBytes"
cpp_vartype: AtomicWord<int>
- default:
+ default:
expr: 100 * 1024 * 1024
- validator:
+ validator:
gte: 0
internalQueryProhibitBlockingMergeOnMongoS:
diff --git a/src/mongo/embedded/mongo_embedded/mongo_embedded_test.cpp b/src/mongo/embedded/mongo_embedded/mongo_embedded_test.cpp
index 96821e99e27..a3fc78e6e8f 100644
--- a/src/mongo/embedded/mongo_embedded/mongo_embedded_test.cpp
+++ b/src/mongo/embedded/mongo_embedded/mongo_embedded_test.cpp
@@ -604,8 +604,6 @@ TEST_F(MongodbCAPITest, RunListCommands) {
"planCacheClear",
"planCacheClearFilters",
"planCacheListFilters",
- "planCacheListPlans",
- "planCacheListQueryShapes",
"planCacheSetFilter",
"reIndex",
"refreshLogicalSessionCacheNow",
diff --git a/src/mongo/s/commands/SConscript b/src/mongo/s/commands/SConscript
index 2fbcc76d81b..d9c0f9b6f12 100644
--- a/src/mongo/s/commands/SConscript
+++ b/src/mongo/s/commands/SConscript
@@ -71,7 +71,7 @@ env.Library(
'cluster_multicast.cpp',
'cluster_netstat_cmd.cpp',
'cluster_pipeline_cmd.cpp',
- 'cluster_plan_cache_cmd.cpp',
+ 'cluster_plan_cache_clear_cmd.cpp',
'cluster_profile_cmd.cpp',
'cluster_refine_collection_shard_key_cmd.cpp',
'cluster_remove_shard_cmd.cpp',
diff --git a/src/mongo/s/commands/cluster_plan_cache_cmd.cpp b/src/mongo/s/commands/cluster_plan_cache_clear_cmd.cpp
index 192c5f9300a..96499720ea6 100644
--- a/src/mongo/s/commands/cluster_plan_cache_cmd.cpp
+++ b/src/mongo/s/commands/cluster_plan_cache_clear_cmd.cpp
@@ -46,16 +46,16 @@ using std::stringstream;
using std::vector;
/**
- * Base class for mongos plan cache commands.
- * Cluster plan cache commands don't do much more than
- * forwarding the commands to all shards and combining the results.
+ * Mongos implementation of the 'planCacheClear' command. Forwards the command to one node in each
+ * targeted shard. For example, with the default read preference ("primary"), clears plan cache
+ * entries on the primary node of each shard.
*/
-class ClusterPlanCacheCmd : public BasicCommand {
- ClusterPlanCacheCmd(const ClusterPlanCacheCmd&) = delete;
- ClusterPlanCacheCmd& operator=(const ClusterPlanCacheCmd&) = delete;
+class ClusterPlanCacheClearCmd final : public BasicCommand {
+ ClusterPlanCacheClearCmd(const ClusterPlanCacheClearCmd&) = delete;
+ ClusterPlanCacheClearCmd& operator=(const ClusterPlanCacheClearCmd&) = delete;
public:
- virtual ~ClusterPlanCacheCmd() {}
+ ClusterPlanCacheClearCmd() : BasicCommand("planCacheClear") {}
AllowedOnSecondary secondaryAllowed(ServiceContext*) const override {
return AllowedOnSecondary::kOptIn;
@@ -66,7 +66,7 @@ public:
}
std::string help() const override {
- return _helpText;
+ return "Drops one or all plan cache entries for a collection.";
}
std::string parseNs(const std::string& dbname, const BSONObj& cmdObj) const override {
@@ -79,40 +79,23 @@ public:
AuthorizationSession* authzSession = AuthorizationSession::get(client);
ResourcePattern pattern = parseResourcePattern(dbname, cmdObj);
- if (authzSession->isAuthorizedForActionsOnResource(pattern, _actionType)) {
+ if (authzSession->isAuthorizedForActionsOnResource(pattern, ActionType::planCacheWrite)) {
return Status::OK();
}
return Status(ErrorCodes::Unauthorized, "unauthorized");
}
- // Cluster plan cache command entry point.
bool run(OperationContext* opCtx,
const std::string& dbname,
const BSONObj& cmdObj,
BSONObjBuilder& result);
+} clusterPlanCacheClearCmd;
-public:
- /**
- * Instantiates a command that can be invoked by "name", which will be described by
- * "helpText", and will require privilege "actionType" to run.
- */
- ClusterPlanCacheCmd(const std::string& name, const std::string& helpText, ActionType actionType)
- : BasicCommand(name), _helpText(helpText), _actionType(actionType) {}
-
-private:
- std::string _helpText;
- ActionType _actionType;
-};
-
-//
-// Cluster plan cache command implementation(s) below
-//
-
-bool ClusterPlanCacheCmd::run(OperationContext* opCtx,
- const std::string& dbName,
- const BSONObj& cmdObj,
- BSONObjBuilder& result) {
+bool ClusterPlanCacheClearCmd::run(OperationContext* opCtx,
+ const std::string& dbName,
+ const BSONObj& cmdObj,
+ BSONObjBuilder& result) {
const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj));
const BSONObj query;
const auto routingInfo =
@@ -146,43 +129,19 @@ bool ClusterPlanCacheCmd::run(OperationContext* opCtx,
uassertStatusOK(status.withContext(str::stream() << "failed on: " << response.shardId));
const auto& cmdResult = response.swResponse.getValue().data;
- // XXX: In absence of sensible aggregation strategy,
- // promote first shard's result to top level.
+ // In absence of sensible aggregation strategy, promote first shard's result to top level.
if (i == shardResponses.begin()) {
CommandHelpers::filterCommandReplyForPassthrough(cmdResult, &result);
status = getStatusFromCommandResult(cmdResult);
clusterCmdResult = status.isOK();
}
- // Append shard result as a sub object.
- // Name the field after the shard.
+ // Append shard result as a sub object. Name the field after the shard.
result.append(response.shardId, cmdResult);
}
return clusterCmdResult;
}
-//
-// Register plan cache commands at startup
-//
-
-MONGO_INITIALIZER(RegisterPlanCacheCommands)(InitializerContext* context) {
- // Leaked intentionally: a Command registers itself when constructed.
-
- new ClusterPlanCacheCmd("planCacheListQueryShapes",
- "Displays all query shapes in a collection.",
- ActionType::planCacheRead);
-
- new ClusterPlanCacheCmd("planCacheClear",
- "Drops one or all cached queries in a collection.",
- ActionType::planCacheWrite);
-
- new ClusterPlanCacheCmd("planCacheListPlans",
- "Displays the cached plans for a query shape.",
- ActionType::planCacheRead);
-
- return Status::OK();
-}
-
} // namespace
} // namespace mongo
diff --git a/src/mongo/shell/collection.js b/src/mongo/shell/collection.js
index 50bf9ac9008..09ab5ede0d1 100644
--- a/src/mongo/shell/collection.js
+++ b/src/mongo/shell/collection.js
@@ -1511,16 +1511,14 @@ PlanCache.prototype.help = function() {
var shortName = this.getName();
print("PlanCache help");
print("\tdb." + shortName + ".getPlanCache().help() - show PlanCache help");
- print("\tdb." + shortName + ".getPlanCache().listQueryShapes() - " +
- "displays all query shapes in a collection");
print("\tdb." + shortName + ".getPlanCache().clear() - " +
"drops all cached queries in a collection");
print("\tdb." + shortName +
".getPlanCache().clearPlansByQuery(query[, projection, sort, collation]) - " +
"drops query shape from plan cache");
- print("\tdb." + shortName +
- ".getPlanCache().getPlansByQuery(query[, projection, sort, collation]) - " +
- "displays the cached plans for a query shape");
+ print("\tdb." + shortName + ".getPlanCache().list([pipeline]) - " +
+ "displays a serialization of the plan cache for this collection, " +
+ "after applying an optional aggregation pipeline");
return __magicNoPrint;
};
@@ -1597,13 +1595,6 @@ PlanCache.prototype._runCommandThrowOnError = function(cmd, params) {
};
/**
- * Lists query shapes in a collection.
- */
-PlanCache.prototype.listQueryShapes = function() {
- return this._runCommandThrowOnError("planCacheListQueryShapes", {}).shapes;
-};
-
-/**
* Clears plan cache in a collection.
*/
PlanCache.prototype.clear = function() {
@@ -1612,14 +1603,6 @@ PlanCache.prototype.clear = function() {
};
/**
- * List plans for a query shape.
- */
-PlanCache.prototype.getPlansByQuery = function(query, projection, sort, collation) {
- return this._runCommandThrowOnError("planCacheListPlans",
- this._parseQueryShape(query, projection, sort, collation));
-};
-
-/**
* Drop query shape from the plan cache.
*/
PlanCache.prototype.clearPlansByQuery = function(query, projection, sort, collation) {
@@ -1627,3 +1610,13 @@ PlanCache.prototype.clearPlansByQuery = function(query, projection, sort, collat
this._parseQueryShape(query, projection, sort, collation));
return;
};
+
+/**
+ * Returns an array of plan cache data for the collection, after applying the given optional
+ * aggregation pipeline.
+ */
+PlanCache.prototype.list = function(pipeline) {
+ const additionalPipeline = pipeline || [];
+ const completePipeline = [{$planCacheStats: {}}].concat(additionalPipeline);
+ return this._collection.aggregate(completePipeline).toArray();
+};