summaryrefslogtreecommitdiff
path: root/jstests/sharding/resubmit_rangedeletions_on_stepup.js
blob: 71a8c79c7a2ff61a6af0a4c6e0796446b8c41ee3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
/**
 * Ensure that orphaned documents are submitted for deletion on step up.
 * @tags: [multiversion_incompatible]
 */

(function() {
"use strict";

load("jstests/libs/uuid_util.js");

TestData.skipCheckingUUIDsConsistentAcrossCluster = true;

const dbName = "test";
const collName = "foo";
const ns = dbName + "." + collName;
const rangeDeletionNs = "config.rangeDeletions";

function setup() {
    // Create 2 shards with 3 replicas each.
    let st = new ShardingTest({shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}});

    // Create a sharded collection with two chunks: [-inf, 50), [50, inf)
    assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
    assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName}));
    assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}}));
    assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 50}}));

    return st;
}

function writeRangeDeletionTask(collectionUuid, shardConn, pending) {
    let deletionTask = {
        _id: UUID(),
        nss: ns,
        collectionUuid: collectionUuid,
        donorShardId: "unused",
        range: {min: {x: 50}, max: {x: MaxKey}},
        whenToClean: "now"
    };

    if (pending)
        deletionTask.pending = true;

    let deletionsColl = shardConn.getCollection(rangeDeletionNs);

    // Write range to deletion collection
    assert.commandWorked(deletionsColl.insert(deletionTask));
}

(() => {
    jsTestLog(
        "Test normal case where the pending field has been removed and the orphans are deleted");
    let st = setup();

    let testDB = st.s.getDB(dbName);
    let testColl = testDB.foo;

    // Move chunk [50, inf) to shard1.
    assert.commandWorked(st.s.adminCommand(
        {moveChunk: ns, find: {x: 50}, to: st.shard1.shardName, _waitForDelete: true}));

    let shard0Coll = st.shard0.getCollection(ns);

    // Write some orphaned documents directly to shard0.
    let orphanCount = 0;
    for (let i = 70; i < 90; i++) {
        assert.commandWorked(shard0Coll.insert({x: i}));
        ++orphanCount;
    }

    const expectedNumDocsTotal = 0;
    const expectedNumDocsShard0 = 0;
    const expectedNumDocsShard1 = 0;

    // Verify total count.
    assert.eq(testColl.find().itcount(), expectedNumDocsTotal);

    // Verify shard0 count includes orphans.
    assert.eq(shard0Coll.find().itcount(), expectedNumDocsShard0 + orphanCount);

    // Verify shard1 count.
    let shard1Coll = st.shard1.getCollection(ns);
    assert.eq(shard1Coll.find().itcount(), expectedNumDocsShard1);

    const collectionUuid = getUUIDFromConfigCollections(st.s, ns);
    writeRangeDeletionTask(collectionUuid, st.shard0);

    // Step down current primary.
    let originalShard0Primary = st.rs0.getPrimary();
    assert.commandWorked(originalShard0Primary.adminCommand({replSetStepDown: 60, force: 1}));

    // Connect to new primary for shard0.
    let shard0Primary = st.rs0.getPrimary();
    let shard0PrimaryColl = shard0Primary.getCollection(ns);

    // Verify that orphans are deleted.
    assert.soon(() => {
        return shard0PrimaryColl.find().itcount() == expectedNumDocsShard0;
    });

    st.stop();
})();

(() => {
    jsTestLog("Test the case where pending: true and the orphans are NOT deleted");
    let st = setup();

    let testDB = st.s.getDB(dbName);
    let testColl = testDB.foo;

    // Move chunk [50, inf) to shard1.
    assert.commandWorked(st.s.adminCommand(
        {moveChunk: ns, find: {x: 50}, to: st.shard1.shardName, _waitForDelete: true}));

    let shard0Coll = st.shard0.getCollection(ns);

    // Write some orphaned documents directly to shard0.
    let orphanCount = 0;
    for (let i = 70; i < 90; i++) {
        assert.commandWorked(shard0Coll.insert({x: i}));
        ++orphanCount;
    }

    const collectionUuid = getUUIDFromConfigCollections(st.s, ns);
    writeRangeDeletionTask(collectionUuid, st.shard0, true);

    const expectedNumDocsTotal = 0;
    const expectedNumDocsShard0 = 0;
    const expectedNumDocsShard1 = 0;

    // Verify total count.
    assert.eq(testColl.find().itcount(), expectedNumDocsTotal);

    // Verify shard0 count includes orphans.
    assert.eq(shard0Coll.find().itcount(), expectedNumDocsShard0 + orphanCount);

    // Verify shard1 count.
    let shard1Coll = st.shard1.getCollection(ns);
    assert.eq(shard1Coll.find().itcount(), expectedNumDocsShard1);

    // Step down current primary.
    let originalShard0Primary = st.rs0.getPrimary();
    assert.commandWorked(originalShard0Primary.adminCommand({replSetStepDown: 60, force: 1}));

    // Connect to new primary for shard0.
    let shard0Primary = st.rs0.getPrimary();
    let shard0PrimaryColl = shard0Primary.getCollection(ns);

    // Verify that orphans are NOT deleted.
    assert.eq(shard0PrimaryColl.find().itcount(), expectedNumDocsShard0 + orphanCount);

    st.stop();
})();
})();