summaryrefslogtreecommitdiff
path: root/jstests/sharding/migration_ignore_interrupts.js
blob: 04b92088b3b1216e077afcb8fed95d9238c533c3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
//
// These tests validates that a migration with session IDs between two shards is protected
// against disruptive migration commands from a third shard. It tests several scenarios.
// For more information on migration session IDs see SERVER-20290.
//

load('./jstests/libs/chunk_manipulation_util.js');

(function() {
    "use strict";

    ///////////////////////////////////////////////////////////////////////////////////////////////////
    // Starting setup
    ///////////////////////////////////////////////////////////////////////////////////////////////////

    // Shard0:
    //      coll1:     [0, 10) [10, 20) [20, 30)
    //      coll2:     [0, 10) [10, 20)
    // Shard1:
    // Shard2:

    var staticMongod1 = MongoRunner.runMongod({});  // For startParallelOps.
    var staticMongod2 = MongoRunner.runMongod({});  // For startParallelOps.

    var st = new ShardingTest({shards: 4, mongos: 1});

    var mongos = st.s0, admin = mongos.getDB('admin'),
        shards = mongos.getCollection('config.shards').find().toArray(), dbName = "testDB",
        ns1 = dbName + ".foo", coll1 = mongos.getCollection(ns1), ns2 = dbName + ".baz",
        coll2 = mongos.getCollection(ns2), shard0 = st.shard0, shard1 = st.shard1,
        shard2 = st.shard2, shard0Coll1 = shard0.getCollection(ns1),
        shard0Coll2 = shard0.getCollection(ns2), shard1Coll1 = shard1.getCollection(ns1),
        shard1Coll2 = shard1.getCollection(ns2), shard2Coll1 = shard2.getCollection(ns1),
        shard2Coll2 = shard2.getCollection(ns2);

    assert.commandWorked(admin.runCommand({enableSharding: dbName}));
    st.ensurePrimaryShard(dbName, shards[0]._id);

    assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
    assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 10}}));
    assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 20}}));
    assert.commandWorked(admin.runCommand({shardCollection: ns2, key: {a: 1}}));
    assert.commandWorked(admin.runCommand({split: ns2, middle: {a: 10}}));

    assert.writeOK(coll1.insert({a: 0}));
    assert.writeOK(coll1.insert({a: 10}));
    assert.writeOK(coll1.insert({a: 20}));
    assert.eq(3, shard0Coll1.count());
    assert.eq(3, coll1.count());
    assert.writeOK(coll2.insert({a: 0}));
    assert.writeOK(coll2.insert({a: 10}));
    assert.eq(2, shard0Coll2.count());
    assert.eq(2, coll2.count());

    ///////////////////////////////////////////////////////////////////////////////////////////////////
    //      1. When a migration is in process from shard0 to shard1 on coll1, shard2 is unable to
    //         start a migration with either shard in the following cases:
    //               1. coll1 shard2 to shard0 -- coll1 is already locked.
    //               2. coll1 shard2 to shard1 -- coll1 is already locked.
    //               3. coll1 shard1 to shard2 -- coll1 is already locked.
    //               4. coll2 shard2 to shard1 -- shard1 can't receive two chunks simultaneously.
    //               5. coll2 shard0 to shard2 -- shard0 can't send two chunks simultaneously.
    ///////////////////////////////////////////////////////////////////////////////////////////////////

    // Shard0:
    //      coll1:     [0, 10)
    //      coll2:     [0, 10)
    // Shard1:
    //      coll1:     [20, 30)
    // Shard2:
    //      coll1:     [10, 20)
    //      coll2:     [10, 20)

    assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 10}, to: shards[2]._id}));
    assert.commandWorked(admin.runCommand({moveChunk: ns2, find: {a: 10}, to: shards[2]._id}));
    assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 20}, to: shards[1]._id}));
    assert.eq(1, shard0Coll1.count());
    assert.eq(1, shard0Coll2.count());
    assert.eq(1, shard1Coll1.count());
    assert.eq(0, shard1Coll2.count());
    assert.eq(1, shard2Coll1.count());
    assert.eq(1, shard2Coll2.count());

    // Start a migration between shard0 and shard1 on coll1 and then pause it
    pauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
    var joinMoveChunk1 = moveChunkParallel(
        staticMongod1, st.s0.host, {a: 0}, null, coll1.getFullName(), shards[1]._id);
    waitForMigrateStep(shard1, migrateStepNames.deletedPriorDataInRange);

    jsTest.log('Attempting to interrupt migration....');
    // Test 1.1
    assert.commandFailed(
        admin.runCommand({moveChunk: ns1, find: {a: 10}, to: shards[0]._id}),
        "(1.1) coll1 lock should have prevented simultaneous migrations in the collection.");
    // Test 1.2
    assert.commandFailed(
        admin.runCommand({moveChunk: ns1, find: {a: 10}, to: shards[1]._id}),
        "(1.2) coll1 lock should have prevented simultaneous migrations in the collection.");
    // Test 1.3
    assert.commandFailed(
        admin.runCommand({moveChunk: ns1, find: {a: 20}, to: shards[2]._id}),
        "(1.3) coll1 lock should have prevented simultaneous migrations in the collection.");
    // Test 1.4
    assert.commandFailed(
        admin.runCommand({moveChunk: ns2, find: {a: 10}, to: shards[1]._id}),
        "(1.4) A shard should not be able to be the recipient of two ongoing migrations");
    // Test 1.5
    assert.commandFailed(
        admin.runCommand({moveChunk: ns2, find: {a: 0}, to: shards[2]._id}),
        "(1.5) A shard should not be able to be the donor for two ongoing migrations.");

    // Finish migration
    unpauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
    assert.doesNotThrow(function() {
        joinMoveChunk1();
    });
    assert.eq(0, shard0Coll1.count());
    assert.eq(2, shard1Coll1.count());

    // Reset setup
    assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 0}, to: shards[0]._id}));
    assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 20}, to: shards[0]._id}));
    assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 10}, to: shards[0]._id}));
    assert.commandWorked(admin.runCommand({moveChunk: ns2, find: {a: 10}, to: shards[0]._id}));
    assert.eq(3, shard0Coll1.count());
    assert.eq(2, shard0Coll2.count());
    assert.eq(0, shard1Coll1.count());
    assert.eq(0, shard1Coll2.count());
    assert.eq(0, shard2Coll1.count());
    assert.eq(0, shard2Coll2.count());

    ///////////////////////////////////////////////////////////////////////////////////////////////////
    //      2. When a migration between shard0 and shard1 is about to enter the commit phase, a
    //         commit command from shard2 (different migration session ID) is rejected.
    ///////////////////////////////////////////////////////////////////////////////////////////////////

    // Shard0:
    //      coll1:     [0, 10) [10, 20) [20, 30)
    //      coll2:     [0, 10) [10, 20)
    // Shard1:
    // Shard2:

    // Start a migration between shard0 and shard1 on coll1, pause in steady state before commit
    pauseMoveChunkAtStep(shard0, moveChunkStepNames.reachedSteadyState);
    joinMoveChunk1 = moveChunkParallel(
        staticMongod1, st.s0.host, {a: 0}, null, coll1.getFullName(), shards[1]._id);
    waitForMoveChunkStep(shard0, moveChunkStepNames.reachedSteadyState);

    jsTest.log('Sending false commit command....');
    assert.commandFailed(
        shard2.adminCommand({'_recvChunkCommit': 1, 'sessionId': "fake-migration-session-id"}));

    jsTest.log("Checking migration recipient is still in steady state, waiting for commit....");
    var res = shard1.adminCommand('_recvChunkStatus');
    assert.commandWorked(res);
    assert.eq(true, res.state === "steady", "False commit command succeeded");

    // Finish migration
    unpauseMoveChunkAtStep(shard0, moveChunkStepNames.reachedSteadyState);
    assert.doesNotThrow(function() {
        joinMoveChunk1();
    });
    assert.eq(2, shard0Coll1.count());
    assert.eq(1, shard1Coll1.count());

    // Reset setup
    assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 0}, to: shards[0]._id}));
    assert.eq(3, shard0Coll1.count());
    assert.eq(2, shard0Coll2.count());
    assert.eq(0, shard1Coll1.count());
    assert.eq(0, shard1Coll2.count());
    assert.eq(0, shard2Coll1.count());
    assert.eq(0, shard2Coll2.count());

    ///////////////////////////////////////////////////////////////////////////////////////////////////
    //      3. If a donor aborts a migration to a recipient, the recipient does not realize the
    //         migration has been aborted, and the donor moves on to a new migration, the original
    //         recipient will then fail to clone documents from the donor.
    ///////////////////////////////////////////////////////////////////////////////////////////////////

    // Shard0:
    //      coll1:     [0, 10) [10, 20) [20, 30)
    //      coll2:     [0, 10) [10, 20)
    // Shard1:
    // Shard2:

    // Start coll1 migration to shard1: pause recipient after delete step, donor before interrupt
    // check
    pauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
    pauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
    joinMoveChunk1 = moveChunkParallel(
        staticMongod1, st.s0.host, {a: 0}, null, coll1.getFullName(), shards[1]._id);
    waitForMigrateStep(shard1, migrateStepNames.deletedPriorDataInRange);

    // Abort migration on donor side, recipient is unaware
    var inProgressOps = admin.currentOp().inprog;
    var abortedMigration = false;
    for (var op in inProgressOps) {
        if (inProgressOps[op].query.moveChunk) {
            admin.killOp(inProgressOps[op].opid);
            abortedMigration = true;
        }
    }
    assert.eq(true,
              abortedMigration,
              "Failed to abort migration, current running ops: " + tojson(inProgressOps));
    unpauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
    assert.throws(function() {
        joinMoveChunk1();
    });

    // Start coll2 migration to shard2, pause recipient after delete step
    pauseMigrateAtStep(shard2, migrateStepNames.deletedPriorDataInRange);
    var joinMoveChunk2 = moveChunkParallel(
        staticMongod2, st.s0.host, {a: 0}, null, coll2.getFullName(), shards[2]._id);
    waitForMigrateStep(shard2, migrateStepNames.deletedPriorDataInRange);

    jsTest.log('Releasing coll1 migration recipient, whose clone command should fail....');
    unpauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
    assert.eq(3, shard0Coll1.count(), "donor shard0 completed a migration that it aborted");
    assert.eq(0, shard1Coll1.count(), "shard1 cloned documents despite donor migration abortion");

    jsTest.log('Finishing coll2 migration, which should succeed....');
    unpauseMigrateAtStep(shard2, migrateStepNames.deletedPriorDataInRange);
    assert.doesNotThrow(function() {
        joinMoveChunk2();
    });
    assert.eq(1,
              shard0Coll2.count(),
              "donor shard0 failed to complete a migration " + "after aborting a prior migration");
    assert.eq(1, shard2Coll2.count(), "shard2 failed to complete migration");

    // Reset setup
    assert.commandWorked(admin.runCommand({moveChunk: ns2, find: {a: 0}, to: shards[0]._id}));
    assert.eq(3, shard0Coll1.count());
    assert.eq(2, shard0Coll2.count());
    assert.eq(0, shard1Coll1.count());
    assert.eq(0, shard1Coll2.count());
    assert.eq(0, shard2Coll1.count());
    assert.eq(0, shard2Coll2.count());

    ///////////////////////////////////////////////////////////////////////////////////////////////////
    //      4. If a donor aborts a migration to a recipient, the recipient does not realize the
    //         migration has been aborted, and the donor moves on to a new migration, the original
    //         recipient will then fail to retrieve transferMods from the donor's xfermods log.
    ///////////////////////////////////////////////////////////////////////////////////////////////////

    // Shard0:
    //      coll1:     [0, 10) [10, 20) [20, 30)
    //      coll2:     [0, 10) [10, 20)
    // Shard1:
    // Shard2:

    // Start coll1 migration to shard1: pause recipient after cloning, donor before interrupt check
    pauseMigrateAtStep(shard1, migrateStepNames.cloned);
    pauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
    joinMoveChunk1 = moveChunkParallel(
        staticMongod1, st.s0.host, {a: 0}, null, coll1.getFullName(), shards[1]._id);
    waitForMigrateStep(shard1, migrateStepNames.cloned);

    // Abort migration on donor side, recipient is unaware
    inProgressOps = admin.currentOp().inprog;
    abortedMigration = false;
    for (var op in inProgressOps) {
        if (inProgressOps[op].query.moveChunk) {
            admin.killOp(inProgressOps[op].opid);
            abortedMigration = true;
        }
    }
    assert.eq(true,
              abortedMigration,
              "Failed to abort migration, current running ops: " + tojson(inProgressOps));
    unpauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
    assert.throws(function() {
        joinMoveChunk1();
    });

    // Start coll2 migration to shard2, pause recipient after cloning step
    pauseMigrateAtStep(shard2, migrateStepNames.cloned);
    var joinMoveChunk2 = moveChunkParallel(
        staticMongod2, st.s0.host, {a: 0}, null, coll2.getFullName(), shards[2]._id);
    waitForMigrateStep(shard2, migrateStepNames.cloned);

    // Populate donor (shard0) xfermods log.
    assert.writeOK(coll2.insert({a: 1}));
    assert.writeOK(coll2.insert({a: 2}));
    assert.eq(4, coll2.count(), "Failed to insert documents into coll2");
    assert.eq(4, shard0Coll2.count());

    jsTest.log('Releasing coll1 migration recipient, whose transferMods command should fail....');
    unpauseMigrateAtStep(shard1, migrateStepNames.cloned);
    assert.eq(3, shard0Coll1.count(), "donor shard0 completed a migration that it aborted");
    assert.eq(1,
              shard1Coll1.count(),
              "shard1 accessed the xfermods log despite " + "donor migration abortion");

    jsTest.log('Finishing coll2 migration, which should succeed....');
    unpauseMigrateAtStep(shard2, migrateStepNames.cloned);
    assert.doesNotThrow(function() {
        joinMoveChunk2();
    });
    assert.eq(1,
              shard0Coll2.count(),
              "donor shard0 failed to complete a migration " + "after aborting a prior migration");
    assert.eq(3, shard2Coll2.count(), "shard2 failed to complete migration");

    st.stop();

})();