summaryrefslogtreecommitdiff
path: root/jstests/sharding/resubmit_rangedeletions_on_stepup.js
blob: 3d288f07ef65daa7c17884156853c246ff18bcb8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
/**
 * Ensure that orphaned documents are submitted for deletion on step up.
 * @tags: [multiversion_incompatible]
 */

(function() {
"use strict";

load("jstests/libs/uuid_util.js");

TestData.skipCheckingUUIDsConsistentAcrossCluster = true;

const dbName = "test";
const collName = "foo";
const ns = dbName + "." + collName;
const rangeDeletionNs = "config.rangeDeletions";

function setup() {
    // Create 2 shards with 3 replicas each.
    let st = new ShardingTest({shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}});

    // Create a sharded collection with two chunks: [-inf, 50), [50, inf)
    assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
    assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
    assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}}));
    assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 50}}));

    return st;
}

// Test normal case where the pending field has been removed and the orphans are deleted.
(() => {
    let st = setup();

    let testDB = st.s.getDB(dbName);
    let testColl = testDB.foo;

    // Insert documents into first chunk
    for (let i = 0; i < 100; i++) {
        testColl.insert({x: i});
    }

    // Pause range deletion.
    let originalShard0Primary = st.rs0.getPrimary();
    originalShard0Primary.adminCommand(
        {configureFailPoint: 'suspendRangeDeletion', mode: 'alwaysOn'});

    // Move chunk [50, inf) to shard1.
    assert.commandWorked(
        st.s.adminCommand({moveChunk: ns, find: {x: 50}, to: st.shard1.shardName}));

    const collectionUuid = getUUIDFromConfigCollections(st.s, ns);

    let deletionTask = {
        _id: UUID(),
        nss: ns,
        collectionUuid: collectionUuid,
        donorShardId: "unused",
        range: {min: {x: 50}, max: {x: MaxKey}},
        whenToClean: "now"
    };

    let deletionsColl = st.shard0.getCollection(rangeDeletionNs);

    // Write range to deletion collection
    deletionsColl.insert(deletionTask);

    const expectedNumDocsTotal = 100;
    const expectedNumDocsShard0 = 50;
    const expectedNumDocsShard1 = 50;

    // Verify total count.
    assert.eq(testColl.find().itcount(), expectedNumDocsTotal);

    // Verify shard0 count includes orphans.
    let shard0Coll = st.shard0.getCollection(ns);
    assert.eq(shard0Coll.find().itcount(), expectedNumDocsShard0 + expectedNumDocsShard1);

    // Verify shard1 count.
    let shard1Coll = st.shard1.getCollection(ns);
    assert.eq(shard1Coll.find().itcount(), expectedNumDocsShard1);

    // Step down current primary.
    assert.commandWorked(originalShard0Primary.adminCommand({replSetStepDown: 60, force: 1}));

    // Connect to new primary for shard0.
    let shard0Primary = st.rs0.getPrimary();
    let shard0PrimaryColl = shard0Primary.getCollection(ns);

    // Verify that orphans are deleted.
    assert.soon(() => {
        return shard0PrimaryColl.find().itcount() == expectedNumDocsShard0;
    });

    st.stop();
})();

// Test the case where pending: true and the orphans are NOT deleted.
(() => {
    let st = setup();

    let testDB = st.s.getDB(dbName);
    let testColl = testDB.foo;

    // Insert documents into first chunk
    for (let i = 0; i < 100; i++) {
        testColl.insert({x: i});
    }

    // Pause range deletion.
    let originalShard0Primary = st.rs0.getPrimary();
    originalShard0Primary.adminCommand(
        {configureFailPoint: 'suspendRangeDeletion', mode: 'alwaysOn'});

    // Move chunk [50, inf) to shard1.
    assert.commandWorked(
        st.s.adminCommand({moveChunk: ns, find: {x: 50}, to: st.shard1.shardName}));

    const collectionUuid = getUUIDFromConfigCollections(st.s, ns);

    let deletionTask = {
        _id: UUID(),
        nss: ns,
        collectionUuid: collectionUuid,
        donorShardId: "unused",
        pending: true,
        range: {min: {x: 50}, max: {x: MaxKey}},
        whenToClean: "now"
    };

    let deletionsColl = st.shard0.getCollection(rangeDeletionNs);

    // Write range to deletion collection
    deletionsColl.insert(deletionTask);

    const expectedNumDocsTotal = 100;
    const expectedNumDocsShard0 = 50;
    const expectedNumDocsShard1 = 50;

    // Verify total count.
    assert.eq(testColl.find().itcount(), expectedNumDocsTotal);

    // Verify shard0 count includes orphans.
    let shard0Coll = st.shard0.getCollection(ns);
    assert.eq(shard0Coll.find().itcount(), expectedNumDocsShard0 + expectedNumDocsShard1);

    // Verify shard1 count.
    let shard1Coll = st.shard1.getCollection(ns);
    assert.eq(shard1Coll.find().itcount(), expectedNumDocsShard1);

    // Step down current primary.
    assert.commandWorked(originalShard0Primary.adminCommand({replSetStepDown: 60, force: 1}));

    // Connect to new primary for shard0.
    let shard0Primary = st.rs0.getPrimary();
    let shard0PrimaryColl = shard0Primary.getCollection(ns);

    // Verify that orphans are NOT deleted.
    assert.eq(shard0PrimaryColl.find().itcount(), expectedNumDocsShard0 + expectedNumDocsShard1);

    st.stop();
})();
})();