summaryrefslogtreecommitdiff
path: root/jstests/sharding/migration_waits_for_majority_commit.js
blob: 72ddd53c329fa108a077066cf06a0081745e9541 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
/**
 * This test is meant to test that a migration will correctly wait for the majority commit point
 * when there are no transfer mod writes (SERVER-42783).
 * @tags: [
 *   requires_majority_read_concern,
 * ]
 */

(function() {
"use strict";

load('./jstests/libs/chunk_manipulation_util.js');
load("jstests/libs/write_concern_util.js");

// Set up a sharded cluster with two shards, two chunks, and one document in one of the chunks.
const st = new ShardingTest({shards: 2, rs: {nodes: 2}});
const testDB = st.s.getDB("test");

assert.commandWorked(testDB.foo.insert({_id: 1}, {writeConcern: {w: "majority"}}));

st.ensurePrimaryShard("test", st.shard0.shardName);
assert.commandWorked(st.s.adminCommand({enableSharding: "test"}));
assert.commandWorked(st.s.adminCommand({shardCollection: "test.foo", key: {_id: 1}}));
assert.commandWorked(st.s.adminCommand({split: "test.foo", middle: {_id: 0}}));
// The default WC is majority and stopServerReplication will prevent satisfying any majority writes.
assert.commandWorked(st.s.adminCommand(
    {setDefaultRWConcern: 1, defaultWriteConcern: {w: 1}, writeConcern: {w: "majority"}}));

// The document is in the majority committed snapshot.
assert.eq(1, testDB.foo.find().readConcern("majority").itcount());

// Advance a migration to the beginning of the cloning phase.
pauseMigrateAtStep(st.rs1.getPrimary(), migrateStepNames.rangeDeletionTaskScheduled);

// For startParallelOps to write its state
let staticMongod = MongoRunner.runMongod({});

let awaitMigration = moveChunkParallel(staticMongod,
                                       st.s.host,
                                       {_id: 1},
                                       null,
                                       "test.foo",
                                       st.shard1.shardName,
                                       false /* expectSuccess */);

// Wait for the migration to reach the failpoint and allow any writes to become majority committed
// before pausing replication.
waitForMigrateStep(st.rs1.getPrimary(), migrateStepNames.rangeDeletionTaskScheduled);
st.rs1.awaitLastOpCommitted();

// Disable replication on the recipient shard's secondary node, so the recipient shard's majority
// commit point cannot advance.
const destinationSec = st.rs1.getSecondary();
stopServerReplication(destinationSec);

// Allow the migration to begin cloning.
unpauseMigrateAtStep(st.rs1.getPrimary(), migrateStepNames.rangeDeletionTaskScheduled);

// Check the migration coordinator document, because the moveChunk command itself
// will hang on trying to remove the recipient's range deletion entry with majority writeConcern
// until replication is re-enabled on the recipient.
assert.soon(() => {
    return st.rs0.getPrimary().getDB("config").getCollection("migrationCoordinators").findOne({
        nss: "test.foo",
        "range.min._id": 0,
        "range.max._id": MaxKey,
        decision: "aborted",
    }) != null;
});

restartServerReplication(destinationSec);

awaitMigration();

st.stop();
MongoRunner.stopMongod(staticMongod);
})();