summaryrefslogtreecommitdiff
path: root/jstests/sharding/clustered_top_chunk_split.js
blob: 116cde729ec2a7293995bb17ae7efd08117ecf85 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
/**
 * This file tests the top-chunk optimization logic in splitChunk command. Whenever a chunk is
 * split, the shouldMigrate field should be set if the extreme chunk  has only a single document,
 * where extreme chunk is defined as the chunk containing either the upper or lower bound of the
 * entire shard key space.
 *
 * This test mimics the existing top_chunk_split.js but on a clustered collection.
 */
(function() {
'use strict';

var st = new ShardingTest({shards: 1});
var testDB = st.s.getDB('test');
assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));

var callSplit = function(db, minKey, maxKey, splitPoints) {
    jsTestLog(`callSplit minKey ${tojson(minKey)}, ${tojson(maxKey)}, ${tojson(splitPoints)}`);
    var res = st.s.adminCommand({getShardVersion: "test.user"});
    assert.commandWorked(res);
    return db.runCommand({
        splitChunk: 'test.user',
        from: st.shard0.shardName,
        min: minKey,
        max: maxKey,
        keyPattern: {_id: 1},
        splitKeys: splitPoints,
        epoch: res.versionEpoch,
    });
};

var tests = [
    //
    // Lower extreme chunk tests.
    //

    // All chunks have 1 doc.
    //
    // Expected doc counts for new chunks:
    // [ MinKey, -2 ): 1
    // [ -2, -1 ): 1
    // [ -1, 0): 1
    //
    function(db) {
        var res = callSplit(db, {_id: MinKey}, {_id: 0}, [{_id: -2}, {_id: -1}]);
        assert.commandWorked(res);
        assert.neq(res.shouldMigrate, null, tojson(res));
        assert(bsonWoCompare(res.shouldMigrate.min, {_id: MinKey}) == 0,
               tojson(res.shouldMigrate.min));
        assert(bsonWoCompare(res.shouldMigrate.max, {_id: -2}) == 0, tojson(res.shouldMigrate.max));
    },

    // One chunk has single doc, extreme doesn't.
    //
    // Expected doc counts for new chunks:
    // [ MinKey, -1 ): 2
    // [ -1, 0): 1
    //
    function(db) {
        var res = callSplit(db, {_id: MinKey}, {_id: 0}, [{_id: -1}]);
        assert.commandWorked(res);
        assert.eq(res.shouldMigrate, null, tojson(res));
    },

    // Only extreme has single doc.
    //
    // Expected doc counts for new chunks:
    // [ MinKey, -2 ): 1
    // [ -2, 0): 2
    //
    function(db) {
        var res = callSplit(db, {_id: MinKey}, {_id: 0}, [{_id: -2}]);
        assert.commandWorked(res);
        assert.neq(res.shouldMigrate, null, tojson(res));
        assert(bsonWoCompare(res.shouldMigrate.min, {_id: MinKey}) == 0,
               tojson(res.shouldMigrate.min));
        assert(bsonWoCompare(res.shouldMigrate.max, {_id: -2}) == 0, tojson(res.shouldMigrate.max));
    },

    //
    // Upper extreme chunk tests.
    //

    // All chunks have 1 doc.
    //
    // Expected doc counts for new chunks:
    // [ 0, 1 ): 1
    // [ 1, 2 ): 1
    // [ 2, MaxKey): 1
    //
    function(db) {
        var res = callSplit(db, {_id: 0}, {_id: MaxKey}, [{_id: 1}, {_id: 2}]);
        assert.commandWorked(res);
        assert.neq(res.shouldMigrate, null, tojson(res));
        assert(bsonWoCompare(res.shouldMigrate.min, {_id: 2}) == 0, tojson(res.shouldMigrate.min));
        assert(bsonWoCompare(res.shouldMigrate.max, {_id: MaxKey}) == 0,
               tojson(res.shouldMigrate.max));
    },

    // One chunk has single doc, extreme doesn't.
    //
    // Expected doc counts for new chunks:
    // [ 0, 1 ): 1
    // [ 1, MaxKey): 2
    //
    function(db) {
        var res = callSplit(db, {_id: 0}, {_id: MaxKey}, [{_id: 1}]);
        assert.commandWorked(res);
        assert.eq(res.shouldMigrate, null, tojson(res));
    },

    // Only extreme has single doc.
    //
    // Expected doc counts for new chunks:
    // [ 0, 2 ): 2
    // [ 2, MaxKey): 1
    //
    function(db) {
        var res = callSplit(db, {_id: 0}, {_id: MaxKey}, [{_id: 2}]);
        assert.commandWorked(res);
        assert.neq(res.shouldMigrate, null, tojson(res));
        assert(bsonWoCompare(res.shouldMigrate.min, {_id: 2}) == 0, tojson(res.shouldMigrate.min));
        assert(bsonWoCompare(res.shouldMigrate.max, {_id: MaxKey}) == 0,
               tojson(res.shouldMigrate.max));
    },
];

tests.forEach(function(test) {
    // setup
    assert.commandWorked(
        testDB.createCollection("user", {clusteredIndex: {key: {_id: 1}, unique: true}}));

    assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
    assert.commandWorked(testDB.adminCommand({split: 'test.user', middle: {_id: 0}}));

    for (var _id = -3; _id < 3; _id++) {
        testDB.user.insert({_id: _id});
    }

    // run test
    test(st.rs0.getPrimary().getDB('admin'));

    // teardown
    testDB.user.drop();
});

st.stop();
})();