summaryrefslogtreecommitdiff
path: root/jstests/sharding/migration_sets_fromMigrate_flag.js
blob: 5ec4121ea77fafc0a070d70dd84dbd87ba83b40e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
//
// Tests whether the fromMigrate flag is correctly set during migrations.
//
// Tests:
//      #1 (delete op) fromMigrate is set when recipient shard deletes all documents locally
//         in the chunk range it is about to receive from the donor shard.
//      #2 (delete op) fromMigrate is set when the donor shard deletes documents that have
//         been migrated to another shard.
//      #3 (insert op) fromMigrate is set when the recipient shard receives chunk migration
//         data and inserts it.
//      #4 (update op) fromMigrate is set when an update occurs in the donor shard during
//         migration and is sent to the recipient via the transfer logs.
//      #5 fromMigrate is NOT set on donor shard and IS set on the recipient shard when real
//         delete op is done during chunk migration within the chunk range.
//

// This test inserts documents into a sharded collection by directly writing to the shards, so the
// collection is created on the non-primary shard with a shard-generated UUID rather than with a
// UUID propagated from the primary shard.
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;

load('./jstests/libs/chunk_manipulation_util.js');

(function() {
    "use strict";

    var staticMongod = MongoRunner.runMongod({});  // For startParallelOps.

    /**
     * Start up new sharded cluster, stop balancer that would interfere in manual chunk management.
     */

    var st = new ShardingTest({shards: 2, mongos: 1, rs: {nodes: 3}});
    st.stopBalancer();

    var mongos = st.s0, admin = mongos.getDB('admin'), dbName = "testDB", ns = dbName + ".foo",
        coll = mongos.getCollection(ns), donor = st.shard0, recipient = st.shard1,
        donorColl = donor.getCollection(ns), recipientColl = recipient.getCollection(ns),
        donorLocal = donor.getDB('local'), recipientLocal = recipient.getDB('local');

    // Two chunks
    // Donor:     [0, 2) [2, 5)
    // Recipient:
    jsTest.log('Enable sharding of the collection and pre-split into two chunks....');

    assert.commandWorked(admin.runCommand({enableSharding: dbName}));
    st.ensurePrimaryShard(dbName, st.shard0.shardName);
    assert.commandWorked(donorColl.createIndex({_id: 1}));
    assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
    assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 2}}));

    // 6 documents,
    //        donor: 2 in the first chunk, 3 in the second.
    //    recipient: 1 document (shardkey overlaps with a doc in second chunk of donor shard)
    jsTest.log('Inserting 5 docs into donor shard, 1 doc into the recipient shard....');

    for (var i = 0; i < 5; ++i)
        assert.writeOK(coll.insert({_id: i}));
    assert.eq(5, donorColl.count());

    for (var i = 2; i < 3; ++i)
        assert.writeOK(recipientColl.insert({_id: i}));
    assert.eq(1, recipientColl.count());

    /**
     * Set failpoint: recipient will pause migration after cloning chunk data from donor,
     * before checking transfer mods log on donor.
     */

    jsTest.log('setting recipient failpoint cloned');
    pauseMigrateAtStep(recipient, migrateStepNames.cloned);

    /**
     * Start moving chunk [2, 5) from donor shard to recipient shard, run in the background.
     */

    // Donor:     [0, 2)
    // Recipient:    [2, 5)
    jsTest.log('Starting chunk migration, pause after cloning...');

    var joinMoveChunk = moveChunkParallel(
        staticMongod, st.s0.host, {_id: 2}, null, coll.getFullName(), st.shard1.shardName);

    /**
     * Wait for recipient to finish cloning.
     * THEN update 1 document {_id: 3} on donor within the currently migrating chunk.
     * AND delete 1 document {_id: 4} on donor within the currently migrating chunk.
     */

    waitForMigrateStep(recipient, migrateStepNames.cloned);

    jsTest.log('Update 1 doc and delete 1 doc on donor within the currently migrating chunk...');

    assert.writeOK(coll.update({_id: 3}, {_id: 3, a: "updated doc"}));
    assert.writeOK(coll.remove({_id: 4}));

    /**
     * Finish migration. Unpause recipient migration, wait for it to collect
     * the transfer mods log from donor and finish migration.
     */

    jsTest.log('Continuing and finishing migration...');
    unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
    joinMoveChunk();

    /**
     * Check documents are where they should be: 2 docs in donor chunk, 2 docs in recipient chunk
     * (because third doc in recipient shard's chunk got deleted on the donor shard during
     * migration).
     */

    jsTest.log('Checking that documents are on the shards they should be...');

    assert.eq(2, recipientColl.count(), "Recipient shard doesn't have exactly 2 documents!");
    assert.eq(2, donorColl.count(), "Donor shard doesn't have exactly 2 documents!");
    assert.eq(4, coll.count(), "Collection total is not 4!");

    /**
     * Check that the fromMigrate flag has been set correctly in donor and recipient oplogs,
     */

    jsTest.log('Checking donor and recipient oplogs for correct fromMigrate flags...');

    var donorOplogRes = donorLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 2}).count();
    assert.eq(1,
              donorOplogRes,
              "fromMigrate flag wasn't set on the donor shard's oplog for " +
                  "migrating delete op on {_id: 2}! Test #2 failed.");

    donorOplogRes =
        donorLocal.oplog.rs.find({op: 'd', fromMigrate: {$exists: false}, 'o._id': 4}).count();
    assert.eq(1,
              donorOplogRes,
              "Real delete of {_id: 4} on donor shard incorrectly set the " +
                  "fromMigrate flag in the oplog! Test #5 failed.");

    var recipientOplogRes =
        recipientLocal.oplog.rs.find({op: 'i', fromMigrate: true, 'o._id': 2}).count();
    assert.eq(1,
              recipientOplogRes,
              "fromMigrate flag wasn't set on the recipient shard's " +
                  "oplog for migrating insert op on {_id: 2}! Test #3 failed.");

    recipientOplogRes =
        recipientLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 2}).count();
    assert.eq(1,
              recipientOplogRes,
              "fromMigrate flag wasn't set on the recipient shard's " +
                  "oplog for delete op on the old {_id: 2} that overlapped " +
                  "with the chunk about to be copied! Test #1 failed.");

    recipientOplogRes =
        recipientLocal.oplog.rs.find({op: 'u', fromMigrate: true, 'o._id': 3}).count();
    assert.eq(1,
              recipientOplogRes,
              "fromMigrate flag wasn't set on the recipient shard's " +
                  "oplog for update op on {_id: 3}! Test #4 failed.");

    recipientOplogRes =
        recipientLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 4}).count();
    assert.eq(1,
              recipientOplogRes,
              "fromMigrate flag wasn't set on the recipient shard's " +
                  "oplog for delete op on {_id: 4} that occurred during " +
                  "migration! Test #5 failed.");

    jsTest.log('DONE!');
    st.stop();

})();