summaryrefslogtreecommitdiff
path: root/jstests/noPassthrough/timeseries_large_measurements_max_size.js
blob: e2a3cf987662ca256afeeb26460b97a1d7ebdd03 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
/**
 * Tests that buckets which are kept open until the number of measurements reaches the threshold
 * (timeseriesBucketMinCount) are closed when the bucket is close to the max BSON size limit.
 *
 * @tags: [
 *   requires_collstats,
 *   requires_fcv_61,
 * ]
 */
(function() {
"use strict";

load("jstests/core/timeseries/libs/timeseries.js");  // For 'TimeseriesTest'.

const conn = MongoRunner.runMongod();

const dbName = jsTestName();
const db = conn.getDB(dbName);
assert.commandWorked(db.dropDatabase());

const coll = db.getCollection(jsTestName());
const bucketColl = db.getCollection("system.buckets." + jsTestName());

const timeFieldName = "time";
const resetCollection = (() => {
    coll.drop();
    assert.commandWorked(
        db.createCollection(jsTestName(), {timeseries: {timeField: timeFieldName}}));
});

const areTimeseriesScalabilityImprovementsEnabled =
    TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db);

const numMeasurements = 4;
let expectedNumBucketsClosedDueToSize = 0;
let expectedNumBucketsKeptOpenDueToLargeMeasurements = 0;
const checkBucketSize = (() => {
    const timeseriesStats = assert.commandWorked(coll.stats()).timeseries;

    if (areTimeseriesScalabilityImprovementsEnabled) {
        // Buckets with large measurements are kept open after exceeding timeseriesBucketMaxSize
        // until they have 10 measurements. However, if the bucket size were to exceed 12MB, it gets
        // closed regardless.
        const bucketDocs = bucketColl.find().sort({'control.min._id': 1}).toArray();
        assert.eq(2, bucketDocs.length, bucketDocs);

        // First bucket should be full with three documents.
        assert.eq(0, bucketDocs[0].control.min._id);
        assert.eq(2, bucketDocs[0].control.max._id);

        // Second bucket should contain the remaining document.
        assert.eq(numMeasurements - 1, bucketDocs[1].control.min._id);
        assert.eq(numMeasurements - 1, bucketDocs[1].control.max._id);

        assert.eq(++expectedNumBucketsClosedDueToSize, timeseriesStats.numBucketsClosedDueToSize);
        assert.eq(++expectedNumBucketsKeptOpenDueToLargeMeasurements,
                  timeseriesStats.numBucketsKeptOpenDueToLargeMeasurements);
    } else {
        // Only one measurement per bucket without time-series scalability improvements.
        const bucketDocs = bucketColl.find().sort({'control.min._id': 1}).toArray();
        assert.eq(numMeasurements, bucketDocs.length, bucketDocs);

        assert(!timeseriesStats.hasOwnProperty("numBucketsKeptOpenDueToLargeMeasurements"));
    }
});

const measurementValueLength = 2 * 1024 * 1024;

jsTestLog("Testing single inserts");
resetCollection();

for (let i = 0; i < numMeasurements; i++) {
    const doc = {_id: i, [timeFieldName]: ISODate(), value: "a".repeat(measurementValueLength)};
    assert.commandWorked(coll.insert(doc));
}
checkBucketSize();

jsTestLog("Testing batched inserts");
resetCollection();

let batch = [];
for (let i = 0; i < numMeasurements; i++) {
    const doc = {_id: i, [timeFieldName]: ISODate(), value: "a".repeat(measurementValueLength)};
    batch.push(doc);
}
assert.commandWorked(coll.insertMany(batch));

checkBucketSize();
MongoRunner.stopMongod(conn);
}());