1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
|
/**
* Ensure that the query plan cache will not block the removal of orphaned documents.
*
* @tags: [
* # This test requires the fix from SERVER-73032.
* requires_fcv_63,
* # SBE is not yet used for clustered collections, and this test centers on the behavior of the
* # SBE plan cache.
* expects_explicit_underscore_id_index,
* ]
*/
(function() {
"use strict";
load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape.
load("jstests/libs/sbe_util.js");
const dbName = "test";
const collName = "sbe_plan_cache_does_not_block_range_deletion";
const ns = dbName + "." + collName;
const st = new ShardingTest({mongos: 1, config: 1, shards: 2});
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
const isSbeFullyEnabled = checkSBEEnabled(st.s.getDB(dbName), ["featureFlagSbeFull"]);
const coll = st.s.getDB(dbName)[collName];
// Runs a test case against 'coll' after setting it up to have the given list of 'indexes' and a
// single 'document'. The test will execute a simple find command with the predicate 'filter'. Then
// it makes sure that the find command results in a cached plan and verifies that the existence of
// the cached plan does not interfere with range deletion.
function runTest({indexes, document, filter}) {
coll.drop();
assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}}));
for (let index of indexes) {
assert.commandWorked(coll.createIndex(index));
}
assert.commandWorked(coll.insert(document));
// Run the same query twice to create an active plan cache entry.
for (let i = 0; i < 2; ++i) {
assert.eq(1, coll.find(filter).itcount());
}
// Ensure there is a cache entry we just created in the plan cache.
const keyHash =
getPlanCacheKeyFromShape({query: filter, collection: coll, db: st.s.getDB(dbName)});
const res =
coll.aggregate([{$planCacheStats: {}}, {$match: {planCacheKey: keyHash}}]).toArray();
assert.eq(1, res.length);
// Move the chunk to the second shard leaving orphaned documents on the first shard.
assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {_id: 0}, to: st.shard1.name}));
assert.soon(() => {
// Ensure that the orphaned documents can be deleted.
//
// The "rangeDeletions" collection exists on each shard and stores a document for each chunk
// range that contains orphaned documents. When the orphaned chunk range is cleaned up, the
// document describing the range is deleted from the collection.
return st.shard0.getDB('config')["rangeDeletions"].find().itcount() === 0;
});
}
// Scenario with just one available indexed plan. If SBE is fully enabled, then the SBE plan cache
// is in use and we expect a pinned plan cache entry.
if (isSbeFullyEnabled) {
runTest({indexes: [{a: 1}], document: {_id: 0, a: "abc"}, filter: {a: "abc"}});
}
// Exercise the multi-planner using a case where there are multiple eligible indexes.
runTest({
indexes: [{a: 1}, {b: 1}],
document: {_id: 0, a: "abc", b: "123"},
filter: {a: "abc", b: "123"},
});
// Test a rooted $or query. This should use the subplanner. The way that the subplanner interacts
// with the plan cache differs between the classic engine and SBE. In the classic engine, the plan
// for each branch is cached independently, whereas in SBE we cache the entire "composite" plan.
// This test is written to expect the SBE behavior, so it only runs when SBE is fully enabled.
if (isSbeFullyEnabled) {
runTest({
indexes: [{a: 1}, {b: 1}, {c: 1}, {d: 1}],
document: {_id: 0, a: "abc", b: "123", c: 4, d: 5},
filter: {$or: [{a: "abc", b: "123"}, {c: 4, d: 5}]},
});
}
st.stop();
})();
|