summaryrefslogtreecommitdiff
path: root/jstests/sharding/orphans_are_removed_after_failover.js
blob: 5e765f56ec41f075c5f1a60da13a045b399e7852 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
/**
 * Ensure that orphaned documents are deleted after a migration or a failover.
 *
 * 1. Create a sharded collection with two chunks on Shard A
 * 2. Pause range deletion on the primary of Shard A
 * 3. Migrate a chunk from Shard A to Shard B
 * 4. Cause a step down on Shard A
 * 5. Connect directly to the new primary of Shard A and verify that eventually no documents remain
 *    from the chunk that was migrated away
 */

(function() {
"use strict";

TestData.skipCheckingUUIDsConsistentAcrossCluster = true;

const dbName = "test";
const collName = "foo";
const ns = dbName + "." + collName;

// Create 2 shards with 3 replicas each.
let st = new ShardingTest({shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}});

// Create a sharded collection with three chunks:
//     [-inf, -10), [-10, 10), [10, inf)
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}}));
assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: -10}}));
assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 10}}));

// Move chunk [10, inf) to shard1.
assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {x: 10}, to: st.shard1.shardName}));

let testDB = st.s.getDB(dbName);
let testColl = testDB.foo;

// Insert 20 docs in first chunk.
for (var i = -100; i < -80; i++) {
    testColl.insert({x: i});
}

// Insert 10 docs in second chunk.
for (var i = -5; i < 5; i++) {
    testColl.insert({x: i});
}

// Insert 10 docs in third chunk.
for (var i = 15; i < 25; i++) {
    testColl.insert({x: i});
}

const expectedNumDocsTotal = 40;
const expectedNumDocsShard0Before = 30;
const expectedNumDocsShard1Before = 10;
const expectedNumDocsShard0After = 20;
const expectedNumDocsShard1After = 20;

// Verify total count.
assert.eq(testColl.find().itcount(), expectedNumDocsTotal);

// Verify shard0 count.
let shard0Coll = st.shard0.getCollection(ns);
assert.eq(shard0Coll.find().itcount(), expectedNumDocsShard0Before);

// Verify shard1 count.
let shard1Coll = st.shard1.getCollection(ns);
assert.eq(shard1Coll.find().itcount(), expectedNumDocsShard1Before);

// Pause range deletion.
let originalShard0Primary = st.rs0.getPrimary();
originalShard0Primary.adminCommand({configureFailPoint: 'suspendRangeDeletion', mode: 'alwaysOn'});

// Move chunk [-10, 10) to shard1.
assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {x: -10}, to: st.shard1.shardName}));

// Verify total count.
assert.eq(testColl.find().itcount(), expectedNumDocsTotal);

// Since the range deleter is paused, we expect the orphaned documents to still be on shard 0,
// so the document count should be the same as it was before the migration.
assert.eq(shard0Coll.find().itcount(), expectedNumDocsShard0Before);

// Verify shard1 count contains moved chunk.
assert.eq(shard1Coll.find().itcount(), expectedNumDocsShard1After);

// Step down current primary.
assert.commandWorked(st.rs0.getPrimary().adminCommand({replSetStepDown: 60, force: 1}));

// Allow the range deleter to run.
originalShard0Primary.adminCommand({configureFailPoint: 'suspendRangeDeletion', mode: 'off'});

// Connect to new primary for shard0.
let shard0Primary = st.rs0.getPrimary();
let shard0PrimaryColl = shard0Primary.getCollection(ns);

// Verify that orphans are deleted on the new primary.
assert.soon(() => {
    return shard0PrimaryColl.find().itcount() === expectedNumDocsShard0After;
});

st.stop();
})();