summaryrefslogtreecommitdiff
path: root/jstests/sharding/timeseries_orphan_buckets.js
blob: 62e04f95d71a3bd162672188ed7fc786205b04dd (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
/**
 * Tests to validate that an orphan bucket is not updatable after a chunk migration.
 *
 * @tags: [requires_fcv_51]
 */

(function() {

load("jstests/libs/fail_point_util.js");
load("jstests/core/timeseries/libs/timeseries.js");
Random.setRandomSeed();

const dbName = "test";
const collName = "foo";
const bucketsNs = dbName + ".system.buckets." + collName;
const timeField = 'time';
const metaField = 'hostid';

const st = new ShardingTest({shards: 2});
const sDB = st.s.getDB(dbName);

assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
const primaryShard = st.getPrimaryShard(dbName);
const otherShard = st.getOther(primaryShard);

if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(primaryShard)) {
    jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled");
    st.stop();
    return;
}

let currentId = 0;
function generateId() {
    return currentId++;
}

function generateDoc(time, metaValue) {
    return TimeseriesTest.generateHosts(1).map((host, index) => Object.assign(host, {
        _id: generateId(),
        [metaField]: metaValue,
        [timeField]: ISODate(time),
    }))[0];
}

const shardKey = {
    [timeField]: 1
};
assert.commandWorked(sDB.createCollection(
    collName, {timeseries: {timeField: timeField, metaField: metaField, granularity: "hours"}}));
assert.commandWorked(sDB.adminCommand({
    shardCollection: `${dbName}.${collName}`,
    key: shardKey,
}));

// Split the chunks such that primary shard has chunk: [MinKey, 2020-01-01) and other shard has
// chunk [2020-01-01, MaxKey].
let splitPoint = {[`control.min.${timeField}`]: ISODate(`2020-01-01`)};
assert.commandWorked(
    sDB.adminCommand({split: `${dbName}.system.buckets.${collName}`, middle: splitPoint}));

let counts = st.chunkCounts(`system.buckets.${collName}`, dbName);
assert.eq(2, counts[primaryShard.shardName], counts);
assert.eq(0, counts[otherShard.shardName], counts);

const coll = sDB.getCollection(collName);
for (let i = 0; i < 2; i++) {
    assert.commandWorked(coll.insert(generateDoc("2019-12-24", i)));
    assert.commandWorked(coll.insert(generateDoc("2019-12-29", i)));
    assert.commandWorked(coll.insert(generateDoc("2020-01-02", i)));
    assert.commandWorked(coll.insert(generateDoc("2020-01-04", i)));
}

jsTest.log("Assert that there are no range deletion tasks");
assert.eq(0, primaryShard.getDB("config").getCollection("rangeDeletions").count());
assert.eq(0, otherShard.getDB("config").getCollection("rangeDeletions").count());

let suspendRangeDeletionShard0 = configureFailPoint(primaryShard, "suspendRangeDeletion");

assert.commandWorked(sDB.adminCommand({
    movechunk: `${dbName}.system.buckets.${collName}`,
    find: {[`control.min.${timeField}`]: MinKey},
    to: otherShard.name,
    _waitForDelete: false
}));

// Ensure that each shard owns one chunk.
counts = st.chunkCounts(`system.buckets.${collName}`, dbName);
assert.eq(1, counts[primaryShard.shardName], counts);
assert.eq(1, counts[otherShard.shardName], counts);

assert.eq(8, coll.find().itcount());

for (let i = 0; i < 2; i++) {
    assert.commandWorked(coll.insert(generateDoc("2019-12-24", i)));
    assert.commandWorked(coll.insert(generateDoc("2019-12-29", i)));
    assert.commandWorked(coll.insert(generateDoc("2020-01-02", i)));
    assert.commandWorked(coll.insert(generateDoc("2020-01-04", i)));
}

assert.eq(16, coll.find().itcount(), coll.find().toArray());

assert.eq(0, otherShard.getDB("config").getCollection("rangeDeletions").count());
assert.eq(1, primaryShard.getDB("config").getCollection("rangeDeletions").count());

suspendRangeDeletionShard0.off();
const res = primaryShard.adminCommand({
    cleanupOrphaned: bucketsNs,
    startingFromKey: {
        [`control.min.${timeField}`]: MinKey
    } /* The startingFromKey parameter should be ignored
       */
});
assert.commandWorked(res);
assert.eq(0, primaryShard.getDB("config").getCollection("rangeDeletions").count());
assert.eq(16, coll.find().itcount(), coll.find().toArray());

st.stop();
})();