summaryrefslogtreecommitdiff
path: root/jstests/sharding/migration_waits_for_majority_commit.js
blob: 6581a8da5920b9b469e62b3f60b8acdfe88a12ba (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
/**
 * This test is meant to test that a migration will correctly wait for the majority commit point
 * when there are no transfer mod writes (SERVER-42783).
 * @tags: [requires_find_command, requires_majority_read_concern]
 */

(function() {
"use strict";

load('./jstests/libs/chunk_manipulation_util.js');

// Set up a sharded cluster with two shards, two chunks, and one document in one of the chunks.
const st = new ShardingTest({shards: 2, rs: {nodes: 2}, config: 1});
const testDB = st.s.getDB("test");

assert.commandWorked(testDB.foo.insert({_id: 1}, {writeConcern: {w: "majority"}}));

st.ensurePrimaryShard("test", st.shard0.shardName);
assert.commandWorked(st.s.adminCommand({enableSharding: "test"}));
assert.commandWorked(st.s.adminCommand({shardCollection: "test.foo", key: {_id: 1}}));
assert.commandWorked(st.s.adminCommand({split: "test.foo", middle: {_id: 0}}));

// The document is in the majority committed snapshot.
assert.eq(1, testDB.foo.find().readConcern("majority").itcount());

// Advance a migration to the beginning of the cloning phase.
pauseMigrateAtStep(st.rs1.getPrimary(), 2);

// For startParallelOps to write its state
let staticMongod = MongoRunner.runMongod({});

let awaitMigration = moveChunkParallel(staticMongod,
                                       st.s.host,
                                       {_id: 1},
                                       null,
                                       "test.foo",
                                       st.shard1.shardName,
                                       false /* expectSuccess */);

// Wait for the migration to reach the failpoint and allow any writes to become majority committed
// before pausing replication.
waitForMigrateStep(st.rs1.getPrimary(), 2);
st.rs1.awaitLastOpCommitted();

// Disable replication on the recipient shard's secondary node, so the recipient shard's majority
// commit point cannot advance.
const destinationSec = st.rs1.getSecondary();
assert.commandWorked(
    destinationSec.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "alwaysOn"}),
    "failed to enable fail point on secondary");

// Allow the migration to begin cloning.
unpauseMigrateAtStep(st.rs1.getPrimary(), 2);

// The migration should fail to commit without being able to advance the majority commit point.
if (jsTestOptions().mongosBinVersion == "last-stable") {
    awaitMigration();

    assert.commandWorked(
        destinationSec.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}),
        "failed to enable fail point on secondary");
} else {
    // In FCV 4.4, check the migration coordinator document, because the moveChunk command itself
    // will hang on trying to remove the recipient's range deletion entry with majority writeConcern
    // until replication is re-enabled on the recipient.
    assert.soon(() => {
        return st.rs0.getPrimary().getDB("config").getCollection("migrationCoordinators").findOne({
            nss: "test.foo",
            "range.min._id": 0,
            "range.max._id": MaxKey,
            decision: "aborted",
        }) != null;
    });

    assert.commandWorked(
        destinationSec.adminCommand({configureFailPoint: "rsSyncApplyStop", mode: "off"}),
        "failed to enable fail point on secondary");

    awaitMigration();
}

st.stop();
MongoRunner.stopMongod(staticMongod);
})();