1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
|
/**
* Tests maximum size of measurements held in each bucket in a time-series buckets collection.
* @tags: [
* requires_fcv_49,
* requires_find_command,
* requires_getmore,
* ]
*/
(function() {
"use strict";
load("jstests/core/time_series/libs/time_series.js");
if (!TimeseriesTest.timeseriesCollectionsEnabled(db.getMongo())) {
jsTestLog("Skipping test because the time-series collection feature flag is disabled");
return;
}
const testDB = db.getSiblingDB(jsTestName());
assert.commandWorked(testDB.dropDatabase());
const coll = testDB.getCollection('t');
const bucketsColl = testDB.getCollection('system.buckets.' + coll.getName());
coll.drop();
const timeFieldName = 'time';
assert.commandWorked(
testDB.createCollection(coll.getName(), {timeseries: {timeField: timeFieldName}}));
assert.contains(bucketsColl.getName(), testDB.getCollectionNames());
const controlVersion = 1;
// Assumes each bucket has a limit of 125kB on the measurements stored in the 'data' field.
const bucketMaxSizeKB = 125;
const numDocs = 2;
// The measurement data should not take up all of the 'bucketMaxSizeKB' limit because we need
// to leave a little room for the _id and the time fields.
const largeValue = 'x'.repeat((bucketMaxSizeKB - 1) * 1024);
for (let i = 0; i < numDocs; i++) {
const t = ISODate();
const doc = {_id: i, [timeFieldName]: t, x: largeValue};
assert.commandWorked(coll.insert(doc), 'failed to insert doc: ' + i + ': ' + tojson(doc));
}
// Check view.
const viewDocs = coll.find({}, {x: 1}).sort({_id: 1}).toArray();
assert.eq(numDocs, viewDocs.length, viewDocs);
for (let i = 0; i < numDocs; i++) {
const viewDoc = viewDocs[i];
assert.eq(i, viewDoc._id, 'unexpected _id in doc: ' + i + ': ' + tojson(viewDoc));
assert.eq(largeValue, viewDoc.x, 'unexpected field x in doc: ' + i + ': ' + tojson(viewDoc));
}
// Check bucket collection.
const bucketDocs = bucketsColl.find().sort({_id: 1}).toArray();
assert.eq(2, bucketDocs.length, bucketDocs);
// Check both buckets.
// First bucket should be full with one document since we spill the second document over into the
// second bucket due to size constraints on 'data'.
assert.eq(
1, bucketDocs[0].control.count, 'invalid count in first bucket: ' + tojson(bucketDocs[0]));
assert.eq(0,
bucketDocs[0].control.min._id,
'invalid control.min for _id in first bucket: ' + tojson(bucketDocs[0].control));
assert.eq(largeValue,
bucketDocs[0].control.min.x,
'invalid control.min for x in first bucket: ' + tojson(bucketDocs[0].control));
assert.eq(0,
bucketDocs[0].control.max._id,
'invalid control.max for _id in first bucket: ' + tojson(bucketDocs[0].control));
assert.eq(largeValue,
bucketDocs[0].control.max.x,
'invalid control.max for x in first bucket: ' + tojson(bucketDocs[0].control));
// Second bucket should contain the remaining document.
assert.eq(
1, bucketDocs[1].control.count, 'invalid count in second bucket: ' + tojson(bucketDocs[1]));
assert.eq(numDocs - 1,
bucketDocs[1].control.min._id,
'invalid control.min for _id in second bucket: ' + tojson(bucketDocs[1].control));
assert.eq(largeValue,
bucketDocs[1].control.min.x,
'invalid control.min for x in second bucket: ' + tojson(bucketDocs[1].control));
assert.eq(numDocs - 1,
bucketDocs[1].control.max._id,
'invalid control.max for _id in second bucket: ' + tojson(bucketDocs[1].control));
assert.eq(largeValue,
bucketDocs[1].control.max.x,
'invalid control.max for x in second bucket: ' + tojson(bucketDocs[1].control));
})();
|