1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
|
/**
* Tests $indexStats on a time-series collection.
*
* @tags: [
* assumes_no_implicit_collection_creation_after_drop,
* # This test attempts to perform write operations and get index usage statistics using the
* # $indexStats stage. The former operation must be routed to the primary in a replica set,
* # whereas the latter may be routed to a secondary.
* assumes_read_preference_unchanged,
* # Cannot implicitly shard accessed collections because of following errmsg: A single
* # update/delete on a sharded collection must contain an exact match on _id or contain the shard
* # key.
* assumes_unsharded_collection,
* does_not_support_stepdowns,
* does_not_support_transactions,
* requires_fcv_49,
* requires_getmore,
* requires_non_retryable_writes,
* ]
*/
(function() {
"use strict";
load("jstests/core/timeseries/libs/timeseries.js");
TimeseriesTest.run((insert) => {
const timeFieldName = 'tm';
const metaFieldName = 'mm';
const doc = {_id: 0, [timeFieldName]: ISODate(), [metaFieldName]: {tag1: 'a', tag2: 'b'}};
const coll = db.timeseries_index_stats;
const bucketsColl = db.getCollection('system.buckets.' + coll.getName());
coll.drop(); // implicitly drops bucketsColl.
assert.commandWorked(db.createCollection(
coll.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}}));
assert.contains(bucketsColl.getName(), db.getCollectionNames());
assert.commandWorked(insert(coll, doc), 'failed to insert doc: ' + tojson(doc));
const indexKeys = {
index0: {[metaFieldName + '.tag1']: 1},
index1: {[metaFieldName + '.tag2']: -1, [timeFieldName]: -1},
index2: {[metaFieldName + '.tag3']: 1, [metaFieldName + '.tag4']: 1},
};
// Create a few indexes on the time-series collections that $indexStats should return.
for (const [indexName, indexKey] of Object.entries(indexKeys)) {
assert.commandWorked(coll.createIndex(indexKey, {name: indexName}),
'failed to create index: ' + indexName + ': ' + tojson(indexKey));
}
// Create an index directly on the buckets collection that would not be visible in the
// time-series collection $indexStats results due to a failed conversion.
assert.commandWorked(bucketsColl.createIndex({not_metadata: 1}, 'bucketindex'),
'failed to create index: ' + tojson({not_metadata: 1}));
// Check that $indexStats aggregation stage returns key patterns that are consistent with the
// ones provided to the createIndexes commands.
const indexStatsDocs = coll.aggregate([{$indexStats: {}}]).toArray();
assert.eq(Object.keys(indexKeys).length, indexStatsDocs.length, tojson(indexStatsDocs));
for (let i = 0; i < indexStatsDocs.length; ++i) {
const stat = indexStatsDocs[i];
assert(indexKeys.hasOwnProperty(stat.name),
'$indexStats returned unknown index: ' + stat.name + ': ' + tojson(indexStatsDocs));
assert.docEq(indexKeys[stat.name],
stat.key,
'$indexStats returned unexpected top-level key for index: ' + stat.name +
': ' + tojson(indexStatsDocs));
assert.docEq(indexKeys[stat.name],
stat.spec.key,
'$indexStats returned unexpected nested key in spec for index: ' + stat.name +
': ' + tojson(indexStatsDocs));
}
// Confirm that that $indexStats is indeed ignoring one index in schema translation by checking
// $indexStats on the buckets collection.
const bucketIndexStatsDocs = bucketsColl.aggregate([{$indexStats: {}}]).toArray();
assert.eq(Object.keys(indexKeys).length + 1,
bucketIndexStatsDocs.length,
tojson(bucketIndexStatsDocs));
// Check that $indexStats is not limited to being the only stage in an aggregation pipeline on a
// time-series collection.
const multiStageDocs =
coll.aggregate([{$indexStats: {}}, {$group: {_id: 0, index_names: {$addToSet: '$name'}}}])
.toArray();
assert.eq(1, multiStageDocs.length, tojson(multiStageDocs));
assert.sameMembers(
Object.keys(indexKeys), multiStageDocs[0].index_names, tojson(multiStageDocs));
});
})();
|