summaryrefslogtreecommitdiff
path: root/jstests/core/cached_plan_trial_does_not_discard_work.js
blob: a7a2d2ffd9c9a796e24abd795625a6122be8c261 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
// Test that, when running a trial of a cached plan that has blocking stages, the planner does not
// invalidate the plan (and discard its results) at the end of the trial unless replanning is
// needed.
//
// @tags: [
//   # This test attempts to perform queries and introspect the server's plan cache entries. The
//   # former operation may be routed to a secondary in the replica set, whereas the latter must be
//   # routed to the primary.
//   assumes_read_concern_unchanged,
//   assumes_read_preference_unchanged,
//   assumes_unsharded_collection,
//   does_not_support_stepdowns,
//   requires_fcv_52,
//   requires_profiling,
// ]
(function() {
'use strict';

load("jstests/libs/profiler.js");  // getLatestProfileEntry.
load("jstests/libs/sbe_util.js");  // For checkSBEEnabled.

if (!checkSBEEnabled(db, ["featureFlagSbeFull"])) {
    jsTestLog("Skipping test because SBE is disabled");
    return;
}

const testDb = db.getSiblingDB('cached_plan_trial_does_not_discard_work');
assert.commandWorked(testDb.dropDatabase());
const coll = testDb.getCollection('test');

const queryPlanEvaluationMaxResults = (() => {
    const getParamRes = assert.commandWorked(
        testDb.adminCommand({getParameter: 1, internalQueryPlanEvaluationMaxResults: 1}));
    return getParamRes["internalQueryPlanEvaluationMaxResults"];
})();

const queryCacheEvictionRatio = (() => {
    const getParamRes = assert.commandWorked(
        testDb.adminCommand({getParameter: 1, internalQueryCacheEvictionRatio: 1}));
    return getParamRes["internalQueryCacheEvictionRatio"];
})();

assert.commandWorked(coll.createIndex({a: 1}));
assert.commandWorked(coll.createIndex({b: 1, d: 1}));

// Add enough documents to the collection to ensure that the test query will always run through its
// "trial period" when using the cached plan.
const numMatchingDocs = 2 * queryPlanEvaluationMaxResults;
let bulk = coll.initializeUnorderedBulkOp();
for (let i = 0; i < 100; i++) {
    // Add documents that will not match the test query but will favor the {a: 1} index.
    bulk.insert({a: 0, b: 1, c: i, d: i % 2});
}
for (let i = 100; i < 100 + numMatchingDocs; i++) {
    // Add documents that will match the test query.
    bulk.insert({a: 1, b: 1, c: i, d: i % 2});
}
assert.commandWorked(bulk.execute());

// We enable profiling and run the test query three times. The first two times, it will go through
// multiplanning.
function runTestQuery(comment) {
    return coll.find({a: 1, b: 1})
        .sort({c: 1})
        .batchSize(numMatchingDocs + 1)
        .comment(comment)
        .itcount();
}

testDb.setProfilingLevel(2);
let lastComment;
for (let i = 0; i < 3; i++) {
    lastComment = `test query: ${i}`;
    const numResults = runTestQuery(lastComment);
    assert.eq(numResults, numMatchingDocs);
}

// Get the profile entry for the third execution, which should have bypassed the multiplanner and
// used a cached plan.
const profileEntry = getLatestProfilerEntry(
    testDb, {'command.find': coll.getName(), 'command.comment': lastComment});
assert(!profileEntry.fromMultiPlanner, profileEntry);
assert('planCacheKey' in profileEntry, profileEntry);

// We expect the cached plan to run through its "trial period," but the planner should determine
// that the cached plan is still good and does _not_ need replanning. Previously, the planner would
// still need to close the execution tree in this scenario, discarding all the work it had already
// done. This test ensures that behavior is corrected: the execution tree should only need to be
// opened 1 time.
assert.eq(profileEntry.execStats.opens, 1, profileEntry);

const planCacheEntry = (() => {
    const planCache =
        coll.getPlanCache().list([{$match: {planCacheKey: profileEntry.planCacheKey}}]);
    assert.eq(planCache.length, 1, planCache);
    return planCache[0];
})();

// Modify the test data so that it will force a replan. We remove all the documents that will match
// the test query and add non-matching documents that will get examined by the index scan (for
// either index). The planner's criterion for when a cached index scan has done too much work (and
// should be replanned) is based on the "works" value in the plan cache entry and the
// "internalQueryCacheEvictionRatio" server parameter, so we use those values to determine how many
// documents to add.
//
// This portion of the test validates that replanning still works as desired even after the query
// planner changes to allow "trial periods" that do not discard results when replanning is not
// necessary.

assert.commandWorked(coll.remove({a: 1, b: 1}));
bulk = coll.initializeUnorderedBulkOp();
for (let i = 0; i < queryCacheEvictionRatio * planCacheEntry.works + 1; i++) {
    bulk.insert({a: 1, b: 0, c: i});
    bulk.insert({a: 0, b: 1, c: i});
}
assert.commandWorked(bulk.execute());

// Run the query one last time, and get its profile entry to enure it triggered replanning.
lastComment = "test query expected to trigger replanning";
const numResults = runTestQuery(lastComment);
assert.eq(numResults, 0);

const replanProfileEntry = getLatestProfilerEntry(
    testDb, {'command.find': coll.getName(), 'command.comment': lastComment});
assert(replanProfileEntry.replanned, replanProfileEntry);
}());