summaryrefslogtreecommitdiff
path: root/jstests/noPassthrough/timeseries_direct_remove_conflict.js
blob: 20876bde2b41002133116fc6b342848921b7f58c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
/**
 * Tests that direct removal in a timeseries bucket collection close the relevant bucket, preventing
 * further inserts from landing in that bucket, including the case where a concurrent catalog write
 * causes a write conflict.
 */
(function() {
'use strict';

load("jstests/libs/fail_point_util.js");
load("jstests/libs/parallel_shell_helpers.js");

const conn = MongoRunner.runMongod();

const dbName = jsTestName();
const testDB = conn.getDB(dbName);
assert.commandWorked(testDB.dropDatabase());

const collName = 'test';

const timeFieldName = 'time';
const times = [
    ISODate('2021-01-01T01:00:00Z'),
    ISODate('2021-01-01T01:10:00Z'),
    ISODate('2021-01-01T01:20:00Z')
];
let docs = [
    {_id: 0, [timeFieldName]: times[0]},
    {_id: 1, [timeFieldName]: times[1]},
    {_id: 2, [timeFieldName]: times[2]}
];

const coll = testDB.getCollection(collName);
const bucketsColl = testDB.getCollection('system.buckets.' + coll.getName());
coll.drop();

assert.commandWorked(
    testDB.createCollection(coll.getName(), {timeseries: {timeField: timeFieldName}}));
assert.contains(bucketsColl.getName(), testDB.getCollectionNames());

assert.commandWorked(coll.insert(docs[0]));
assert.docEq(coll.find().sort({_id: 1}).toArray(), docs.slice(0, 1));

let buckets = bucketsColl.find().sort({_id: 1}).toArray();
assert.eq(buckets.length, 1);
assert.eq(buckets[0].control.min[timeFieldName], times[0]);
assert.eq(buckets[0].control.max[timeFieldName], times[0]);

const fpInsert = configureFailPoint(conn, "hangTimeseriesInsertBeforeWrite");
const awaitInsert = startParallelShell(
    funWithArgs(function(dbName, collName, doc) {
        assert.commandWorked(db.getSiblingDB(dbName).getCollection(collName).insert(doc));
    }, dbName, coll.getName(), docs[1]), conn.port);
fpInsert.wait();

const fpRemove = configureFailPoint(conn, "hangTimeseriesDirectModificationBeforeWriteConflict");
const awaitRemove = startParallelShell(
    funWithArgs(function(dbName, collName, id) {
        const removeResult = assert.commandWorked(
            db.getSiblingDB(dbName).getCollection('system.buckets.' + collName).remove({_id: id}));
        assert.eq(removeResult.nRemoved, 1);
    }, dbName, coll.getName(), buckets[0]._id), conn.port);
fpRemove.wait();

fpRemove.off();
fpInsert.off();
awaitRemove();
awaitInsert();

// The expected ordering is that the insert finished, then the remove deleted the bucket document,
// so there should be no documents left.

assert.docEq(coll.find().sort({_id: 1}).toArray().length, 0);

buckets = bucketsColl.find().sort({_id: 1}).toArray();
assert.eq(buckets.length, 0);

// Now another insert should generate a new bucket.

assert.commandWorked(coll.insert(docs[2]));
assert.docEq(coll.find().sort({_id: 1}).toArray(), docs.slice(2, 3));

buckets = bucketsColl.find().sort({_id: 1}).toArray();
assert.eq(buckets.length, 1);
assert.eq(buckets[0].control.min[timeFieldName], times[2]);
assert.eq(buckets[0].control.max[timeFieldName], times[2]);

MongoRunner.stopMongod(conn);
})();