summaryrefslogtreecommitdiff
path: root/jstests/sharding/write_cmd_auto_split.js
blob: 55796aa1c3034c2d7967cfd9997812b6cffcd06e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
/**
 * Tests the auto split will be triggered when using write commands.
 */
(function() {
'use strict';
load('jstests/sharding/autosplit_include.js');

var st = new ShardingTest({shards: 1, other: {chunkSize: 1, enableAutoSplit: true}});

var configDB = st.s.getDB('config');
assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
assert.commandWorked(configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}}));

var doc1k = (new Array(1024)).join('x');
var testDB = st.s.getDB('test');

function testSingleBatchInsertShouldAutoSplit() {
    jsTest.log('Test single batch insert should auto-split');

    assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
    assert.commandWorked(configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}}));

    assert.eq(1, configDB.chunks.find({"ns": "test.insert"}).itcount());

    // This should result in a little over 3MB inserted into the chunk, so with
    // a max chunk size of 1MB we'd expect the autosplitter to split this into
    // at least 3 chunks
    for (var x = 0; x < 3100; x++) {
        assert.commandWorked(testDB.runCommand({
            insert: 'insert',
            documents: [{x: x, v: doc1k}],
            ordered: false,
            writeConcern: {w: 1}
        }));
    }

    waitForOngoingChunkSplits(st);

    // Inserted batch is a multiple of the chunkSize, expect the chunks to split into
    // more than 2.
    assert.gt(configDB.chunks.find({"ns": "test.insert"}).itcount(), 2);
    testDB.dropDatabase();

    jsTest.log('Test single batch update should auto-split');

    assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
    assert.commandWorked(configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}}));

    assert.eq(1, configDB.chunks.find({"ns": "test.update"}).itcount());

    for (var x = 0; x < 2100; x++) {
        assert.commandWorked(testDB.runCommand({
            update: 'update',
            updates: [{q: {x: x}, u: {x: x, v: doc1k}, upsert: true}],
            ordered: false,
            writeConcern: {w: 1}
        }));
    }

    waitForOngoingChunkSplits(st);

    assert.gt(configDB.chunks.find({"ns": "test.update"}).itcount(), 1);
    testDB.dropDatabase();
}

function testSingleDeleteShouldNotAutoSplit() {
    jsTest.log('Test single delete should not auto-split');

    assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
    assert.commandWorked(configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}}));

    assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());

    for (var x = 0; x < 1100; x++) {
        assert.commandWorked(testDB.runCommand({
            delete: 'delete',
            deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
            ordered: false,
            writeConcern: {w: 1}
        }));
    }

    // If we are autosplitting (which we shouldn't be), we want to wait until
    // it's finished, otherwise we could falsely think no autosplitting was
    // done when really it was just in progress.
    waitForOngoingChunkSplits(st);

    assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
    testDB.dropDatabase();
}

function testBatchedInsertShouldAutoSplit() {
    jsTest.log('Test batched insert should auto-split');

    assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
    assert.commandWorked(configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}}));

    assert.eq(1, configDB.chunks.find({"ns": "test.insert"}).itcount());

    // Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so
    // we are going to be conservative.
    for (var x = 0; x < 2100; x += 400) {
        var docs = [];

        for (var y = 0; y < 400; y++) {
            docs.push({x: (x + y), v: doc1k});
        }

        assert.commandWorked(testDB.runCommand(
            {insert: 'insert', documents: docs, ordered: false, writeConcern: {w: 1}}));
    }

    waitForOngoingChunkSplits(st);

    assert.gt(configDB.chunks.find({"ns": "test.insert"}).itcount(), 1);
    testDB.dropDatabase();
}

function testBatchedUpdateShouldAutoSplit() {
    jsTest.log('Test batched update should auto-split');

    assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
    assert.commandWorked(configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}}));

    assert.eq(1, configDB.chunks.find({"ns": "test.update"}).itcount());

    for (var x = 0; x < 2100; x += 400) {
        var docs = [];

        for (var y = 0; y < 400; y++) {
            var id = x + y;
            docs.push({q: {x: id}, u: {x: id, v: doc1k}, upsert: true});
        }

        assert.commandWorked(testDB.runCommand(
            {update: 'update', updates: docs, ordered: false, writeConcern: {w: 1}}));
    }

    waitForOngoingChunkSplits(st);

    assert.gt(configDB.chunks.find({"ns": "test.update"}).itcount(), 1);
    testDB.dropDatabase();
}

function testBatchedDeleteShouldNotAutoSplit() {
    jsTest.log('Test batched delete should not auto-split');

    assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
    assert.commandWorked(configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}}));

    assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());

    for (var x = 0; x < 2100; x += 400) {
        var docs = [];

        for (var y = 0; y < 400; y++) {
            var id = x + y;
            docs.push({q: {x: id, v: doc1k}, top: 0});
        }

        assert.commandWorked(testDB.runCommand({
            delete: 'delete',
            deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
            ordered: false,
            writeConcern: {w: 1}
        }));
    }

    // If we are autosplitting (which we shouldn't be), we want to wait until
    // it's finished, otherwise we could falsely think no autosplitting was
    // done when really it was just in progress.
    waitForOngoingChunkSplits(st);

    assert.eq(1, configDB.chunks.find({"ns": "test.delete"}).itcount());
}

var testCases = [
    testSingleBatchInsertShouldAutoSplit,
    testSingleDeleteShouldNotAutoSplit,
    testBatchedInsertShouldAutoSplit,
    testBatchedUpdateShouldAutoSplit,
    testBatchedDeleteShouldNotAutoSplit
];

for (let testCase of testCases) {
    try {
        testDB.dropDatabase();
        testCase();
    } catch (e) {
        print("Retrying test case failed due to " + e);
        // (SERVER-59882) The split may not have happened due to write-unit-of-work commit delay
        // Give it another best-effort try, given the low probability it would happen again
        testDB.dropDatabase();
        testCase();
    }
}

st.stop();
})();