summaryrefslogtreecommitdiff
path: root/jstests/concurrency/fsm_workloads/sharded_splitChunk_partitioned.js
blob: 8a32ea24f97d4db951ad2c2c6ac2a352dfd00c7b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
'use strict';

/**
 * Extends sharded_base_partitioned.js.
 *
 * Exercises the concurrent splitChunk operations, but each thread operates on its own set of
 * chunks.
 *
 * @tags: [requires_sharding, assumes_balancer_off, assumes_autosplit_off]
 */

load('jstests/concurrency/fsm_libs/extend_workload.js');                // for extendWorkload
load('jstests/concurrency/fsm_workloads/sharded_base_partitioned.js');  // for $config

var $config = extendWorkload($config, function($config, $super) {

    $config.iterations = 5;
    $config.threadCount = 5;

    $config.data.partitionSize = 100;  // number of shard key values

    // Split a random chunk in this thread's partition, and verify that each node
    // in the cluster affected by the splitChunk operation sees the appropriate
    // after-state regardless of whether the operation succeeded or failed.
    $config.states.splitChunk = function splitChunk(db, collName, connCache) {

        var dbName = db.getName();
        var ns = db[collName].getFullName();
        var config = ChunkHelper.getPrimary(connCache.config);

        // Choose a random chunk in our partition to split.
        var chunk = this.getRandomChunkInPartition(config);

        // Save the number of documents found in this chunk's range before the splitChunk
        // operation. This will be used to verify that the same number of documents in that
        // range are found after the splitChunk.
        // Choose the mongos randomly to distribute load.
        var numDocsBefore = ChunkHelper.getNumDocs(
            ChunkHelper.getRandomMongos(connCache.mongos), ns, chunk.min._id, chunk.max._id);

        // Save the number of chunks before the splitChunk operation. This will be used
        // to verify that the number of chunks after a successful splitChunk increases
        // by one, or after a failed splitChunk stays the same.
        var numChunksBefore = ChunkHelper.getNumChunks(
            config, ns, this.partition.chunkLower, this.partition.chunkUpper);

        // Use chunk_helper.js's splitChunk wrapper to tolerate acceptable failures
        // and to use a limited number of retries with exponential backoff.
        var bounds = [{_id: chunk.min._id}, {_id: chunk.max._id}];
        var splitChunkRes = ChunkHelper.splitChunkWithBounds(db, collName, bounds);
        var msgBase = 'Result of splitChunk operation: ' + tojson(splitChunkRes);

        // Regardless of whether the splitChunk operation succeeded or failed,
        // verify that the shard the original chunk was on returns all data for the chunk.
        var shardPrimary = ChunkHelper.getPrimary(connCache.shards[chunk.shard]);
        var shardNumDocsAfter =
            ChunkHelper.getNumDocs(shardPrimary, ns, chunk.min._id, chunk.max._id);
        var msg = 'Shard does not have same number of documents after splitChunk.\n' + msgBase;
        assertWhenOwnColl.eq(shardNumDocsAfter, numDocsBefore, msg);

        // Verify that all config servers have the correct after-state.
        // (see comments below for specifics).
        for (var conn of connCache.config) {
            var res = conn.adminCommand({isMaster: 1});
            assertAlways.commandWorked(res);
            if (res.ismaster) {
                // If the splitChunk operation succeeded, verify that there are now
                // two chunks between the old chunk's lower and upper bounds.
                // If the operation failed, verify that there is still only one chunk
                // between the old chunk's lower and upper bounds.
                var numChunksBetweenOldChunksBounds =
                    ChunkHelper.getNumChunks(conn, ns, chunk.min._id, chunk.max._id);
                if (splitChunkRes.ok) {
                    msg = 'splitChunk succeeded but the config does not see exactly 2 chunks ' +
                        'between the chunk bounds.\n' + msgBase;
                    assertWhenOwnColl.eq(numChunksBetweenOldChunksBounds, 2, msg);
                } else {
                    msg = 'splitChunk failed but the config does not see exactly 1 chunk between ' +
                        'the chunk bounds.\n' + msgBase;
                    assertWhenOwnColl.eq(numChunksBetweenOldChunksBounds, 1, msg);
                }

                // If the splitChunk operation succeeded, verify that the total number
                // of chunks in our partition has increased by 1. If it failed, verify
                // that it has stayed the same.
                var numChunksAfter = ChunkHelper.getNumChunks(
                    config, ns, this.partition.chunkLower, this.partition.chunkUpper);
                if (splitChunkRes.ok) {
                    msg = 'splitChunk succeeded but the config does nnot see exactly 1 more ' +
                        'chunk between the chunk bounds.\n' + msgBase;
                    assertWhenOwnColl.eq(numChunksAfter, numChunksBefore + 1, msg);
                } else {
                    msg = 'splitChunk failed but the config does not see the same number ' +
                        'of chunks between the chunk bounds.\n' + msgBase;
                    assertWhenOwnColl.eq(numChunksAfter, numChunksBefore, msg);
                }
            }
        }

        // Verify that all mongos processes see the correct after-state on the shards and configs.
        // (see comments below for specifics).
        for (var mongos of connCache.mongos) {
            // Regardless of if the splitChunk operation succeeded or failed, verify that each
            // mongos sees as many documents in the chunk's range after the move as there were
            // before.
            var numDocsAfter = ChunkHelper.getNumDocs(mongos, ns, chunk.min._id, chunk.max._id);

            msg = 'Mongos does not see same number of documents after splitChunk.\n' + msgBase;
            assertWhenOwnColl.eq(numDocsAfter, numDocsBefore, msgBase);

            // Regardless of if the splitChunk operation succeeded or failed,
            // verify that each mongos sees all data in the original chunk's
            // range only on the shard the original chunk was on.
            var shardsForChunk =
                ChunkHelper.getShardsForRange(mongos, ns, chunk.min._id, chunk.max._id);
            msg = 'Mongos does not see exactly 1 shard for chunk after splitChunk.\n' + msgBase +
                '\n' +
                'Mongos find().explain() results for chunk: ' + tojson(shardsForChunk);
            assertWhenOwnColl.eq(shardsForChunk.shards.length, 1, msg);

            msg = 'Mongos sees different shard for chunk than chunk does after splitChunk.\n' +
                msgBase + '\n' +
                'Mongos find().explain() results for chunk: ' + tojson(shardsForChunk);
            assertWhenOwnColl.eq(shardsForChunk.shards[0], chunk.shard, msg);

            // If the splitChunk operation succeeded, verify that the mongos sees two chunks between
            // the old chunk's lower and upper bounds. If the operation failed, verify that the
            // mongos still only sees one chunk between the old chunk's lower and upper bounds.
            var numChunksBetweenOldChunksBounds =
                ChunkHelper.getNumChunks(mongos, ns, chunk.min._id, chunk.max._id);
            if (splitChunkRes.ok) {
                msg = 'splitChunk succeeded but the mongos does not see exactly 2 chunks ' +
                    'between the chunk bounds.\n' + msgBase;
                assertWhenOwnColl.eq(numChunksBetweenOldChunksBounds, 2, msg);
            } else {
                msg = 'splitChunk failed but the mongos does not see exactly 1 chunk between ' +
                    'the chunk bounds.\n' + msgBase;
                assertWhenOwnColl.eq(numChunksBetweenOldChunksBounds, 1, msg);
            }

            // If the splitChunk operation succeeded, verify that the total number of chunks in our
            // partition has increased by 1. If it failed, verify that it has stayed the same.
            var numChunksAfter = ChunkHelper.getNumChunks(
                mongos, ns, this.partition.chunkLower, this.partition.chunkUpper);
            if (splitChunkRes.ok) {
                msg = 'splitChunk succeeded but the mongos does nnot see exactly 1 more ' +
                    'chunk between the chunk bounds.\n' + msgBase;
                assertWhenOwnColl.eq(numChunksAfter, numChunksBefore + 1, msg);
            } else {
                msg = 'splitChunk failed but the mongos does not see the same number ' +
                    'of chunks between the chunk bounds.\n' + msgBase;
                assertWhenOwnColl.eq(numChunksAfter, numChunksBefore, msg);
            }
        }
    };

    $config.transitions = {init: {splitChunk: 1}, splitChunk: {splitChunk: 1}};

    return $config;
});