summaryrefslogtreecommitdiff
path: root/jstests/sharding/timeseries_drop.js
blob: 1d2d78ab8a867741016eaafb183963733e6b1e9b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
/**
 * Test drop of time-series collection.
 *
 * @tags: [
 *   requires_fcv_51,
 * ]
 */

(function() {
"use strict";

load("jstests/core/timeseries/libs/timeseries.js");  // For 'TimeseriesTest' helpers.

Random.setRandomSeed();

const dbName = 'testDB';
const collName = 'testColl';
const timeField = 'time';
const metaField = 'hostid';

// Connections.
const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
const mongos = st.s0;

// Sanity checks.
if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) {
    jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled");
    st.stop();
    return;
}

// Databases.
const mainDB = mongos.getDB(dbName);
const configDB = mongos.getDB('config');

// Helpers.
let currentId = 0;
function generateId() {
    return currentId++;
}

function generateBatch(size) {
    return TimeseriesTest.generateHosts(size).map((host, index) => Object.assign(host, {
        _id: generateId(),
        [metaField]: index,
        [timeField]: ISODate(`20${index}0-01-01`),
    }));
}

function ensureCollectionDoesNotExist(collName) {
    const databases = [mainDB, st.shard0.getDB(dbName), st.shard1.getDB(dbName)];
    for (const db of databases) {
        const collections = db.getCollectionNames();
        assert(!collections.includes(collName), collections);
    }
}

function runTest(getShardKey, performChunkSplit) {
    mainDB.dropDatabase();

    assert.commandWorked(mongos.adminCommand({enableSharding: dbName}));

    // Create timeseries collection.
    assert.commandWorked(mainDB.createCollection(
        collName, {timeseries: {timeField: timeField, metaField: metaField}}));
    const coll = mainDB.getCollection(collName);

    // Shard timeseries collection.
    const shardKey = getShardKey(1, 1);
    assert.commandWorked(coll.createIndex(shardKey));
    assert.commandWorked(mongos.adminCommand({
        shardCollection: `${dbName}.${collName}`,
        key: shardKey,
    }));

    // Insert initial set of documents.
    const numDocs = 8;
    const firstBatch = generateBatch(numDocs);
    assert.commandWorked(coll.insert(firstBatch));

    if (performChunkSplit) {
        // Manually split the data into two chunks.
        const splitIndex = numDocs / 2;
        const splitPoint = {};
        if (shardKey.hasOwnProperty(metaField)) {
            splitPoint.meta = firstBatch[splitIndex][metaField];
        }
        if (shardKey.hasOwnProperty(timeField)) {
            splitPoint[`control.min.${timeField}`] = firstBatch[splitIndex][timeField];
        }

        assert.commandWorked(mongos.adminCommand(
            {split: `${dbName}.system.buckets.${collName}`, middle: splitPoint}));

        // Ensure that currently both chunks reside on the primary shard.
        let counts = st.chunkCounts(`system.buckets.${collName}`, dbName);
        const primaryShard = st.getPrimaryShard(dbName);
        assert.eq(2, counts[primaryShard.shardName], counts);

        // Move one of the chunks into the second shard.
        const otherShard = st.getOther(primaryShard);
        assert.commandWorked(mongos.adminCommand({
            movechunk: `${dbName}.system.buckets.${collName}`,
            find: splitPoint,
            to: otherShard.name,
            _waitForDelete: true
        }));

        // Ensure that each shard owns one chunk.
        counts = st.chunkCounts(`system.buckets.${collName}`, dbName);
        assert.eq(1, counts[primaryShard.shardName], counts);
        assert.eq(1, counts[otherShard.shardName], counts);
    }

    // Drop the time-series collection.
    assert(coll.drop());

    // Ensure that both time-series view and time-series buckets collections do not exist according
    // to mongos and both shards.
    ensureCollectionDoesNotExist(collName);
    ensureCollectionDoesNotExist(`system.buckets.${collName}`);

    // Ensure that the time-series buckets collection gets deleted from the config database as well.
    assert.eq([],
              configDB.collections.find({_id: `${dbName}.system.buckets.${collName}`}).toArray());
}

try {
    for (let performChunkSplit of [false, true]) {
        function metaShardKey(meta, _) {
            return {[metaField]: meta};
        }
        runTest(metaShardKey, performChunkSplit);

        function timeShardKey(_, time) {
            return {[timeField]: time};
        }
        runTest(timeShardKey, performChunkSplit);

        function timeAndMetaShardKey(meta, time) {
            return {[metaField]: meta, [timeField]: time};
        }
        runTest(timeAndMetaShardKey, performChunkSplit);
    }
} finally {
    st.stop();
}
})();