summaryrefslogtreecommitdiff
path: root/jstests/sharding/shard_removal_triggers_catalog_cache_invalidation.js
blob: 7b57a357598563376ef58e4589a02433cefc06ba (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
/**
 * Tests that shard removal triggers an update of the catalog cache so that routers don't continue
 * to target shards that have been removed.
 */
(function() {
'use strict';
load('jstests/sharding/libs/remove_shard_util.js');

// Checking UUID consistency involves talking to shards, but this test shuts down shards.
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
TestData.skipCheckingIndexesConsistentAcrossCluster = true;
TestData.skipCheckShardFilteringMetadata = true;

// TODO SERVER-50144 Remove this and allow orphan checking.
// This test calls removeShard which can leave docs in config.rangeDeletions in state "pending",
// therefore preventing orphans from being cleaned up.
TestData.skipCheckOrphans = true;

const dbName = 'TestDB';

/**
 * Test that sharded collections with data on a shard that gets removed are correctly invalidated in
 * a router's catalog cache.
 *
 * 1. Create 2 shards and 2 routers. Make shard0 the primary shard for a database.
 * 2. Put data for a sharded collection on shard0.
 * 3. Ensure both routers have up-to-date routing info.
 * 4. Remove shard0 by sending removeShard through router 0. All data will be migrated to shard1.
 * 5. Send a query through router 1 to target the sharded collection. This should correctly target
 *    shard1.
 */
(() => {
    jsTestLog(
        "Test that sharded collections with data on a shard that gets removed are correctly invalidated in a router's catalog cache.");

    const shardedCollName = 'Coll';
    const shardedCollNs = dbName + '.' + shardedCollName;

    var st = new ShardingTest({shards: 2, mongos: 2});
    let router0ShardedColl = st.s0.getDB(dbName)[shardedCollName];
    let router1ShardedColl = st.s1.getDB(dbName)[shardedCollName];

    assert.commandWorked(st.s0.adminCommand({enableSharding: dbName}));
    st.ensurePrimaryShard(dbName, st.shard1.shardName);
    assert.commandWorked(st.s0.adminCommand({shardCollection: shardedCollNs, key: {_id: 1}}));

    // Make sure data is inserted into shard0
    assert.commandWorked(st.s0.adminCommand({
        moveChunk: shardedCollNs,
        find: {_id: -1},
        to: st.shard0.shardName,
        _waitForDelete: true
    }));

    // Insert some documents into the sharded collection on shard0.
    router0ShardedColl.insert({_id: -1});
    router0ShardedColl.insert({_id: 1});

    // Force s0 and s1 to load the database and collection cache entries for the sharded collection.
    assert.eq(2, router0ShardedColl.find({}).itcount());
    assert.eq(2, router1ShardedColl.find({}).itcount());

    // Start the balancer here so that it can drain shard0 when it's removed but also won't conflict
    // with the above moveChunk command.
    st.startBalancer();

    // Remove shard0.
    removeShard(st, st.shard0.shardName);

    // Stop the replica set so that future requests to this shard will be unsuccessful. Skip this
    // step for a config shard, since the config server must be up for the second router to
    // refresh. The default read concern is local, so the router should eventually target a shard
    // with chunks.
    if (!TestData.configShard) {
        st.rs0.stopSet();
    }

    // Ensure that s1, the router which did not run removeShard, eventually stops targeting chunks
    // for the sharded collection which previously resided on a shard that no longer exists.
    assert.soon(() => {
        try {
            const count = router1ShardedColl.count({_id: 1});
            return true;
        } catch (e) {
            print(e);
            return false;
        }
    });

    st.stop();
})();

/**
 * Test that entries for a database whose original primary shard gets removed are correctly
 * invalidated in a router's catalog cache.
 *
 * 1. Create 2 shards and 2 routers. Make shard0 the primary shard for a database.
 * 2. Put data for an unsharded collection on shard0.
 * 3. Ensure both routers have up-to-date routing info.
 * 4. movePrimary for the database to shard1.
 * 4. Remove shard0 by sending removeShard through router 0.
 * 5. Send a query through router 1 to target the sharded and unsharded collections. This should
 *    correctly target shard1.
 */
(() => {
    jsTestLog(
        "Test that entries for a database whose original primary shard gets removed are correctly invalidated in a router's catalog cache.");

    const unshardedCollName = 'UnshardedColl';
    const unshardedCollNs = dbName + '.' + unshardedCollName;

    var st = new ShardingTest({shards: 2, mongos: 2, other: {enableBalancer: true}});

    let router0UnshardedColl = st.s0.getDB(dbName)[unshardedCollName];
    let router1UnshardedColl = st.s1.getDB(dbName)[unshardedCollName];

    assert.commandWorked(st.s0.adminCommand({enableSharding: dbName}));
    st.ensurePrimaryShard(dbName, st.shard0.shardName);

    // Insert some documents into the unsharded collection whose primary is the to-be-removed
    // shard0.
    router0UnshardedColl.insert({_id: 1});

    // Force s0 and s1 to load the database and collection cache entries for the unsharded
    // collection.
    assert.eq(1, router0UnshardedColl.find({}).itcount());
    assert.eq(1, router1UnshardedColl.find({}).itcount());

    // Call movePrimary for the database so that shard0 can be removed.
    assert.commandWorked(st.s0.adminCommand({movePrimary: dbName, to: st.shard1.shardName}));

    // Remove shard0. We need assert.soon since chunks in the sessions collection may need to be
    // migrated off by the balancer.
    removeShard(st, st.shard0.shardName);

    // Stop the replica set so that future requests to this shard will be unsuccessful. Skip this
    // step for a config shard, since the config server must be up for the second router to
    // refresh. The default read concern is local, so the router should eventually target a shard
    // with chunks.
    if (!TestData.configShard) {
        st.rs0.stopSet();
    }

    // Ensure that s1, the router which did not run removeShard, eventually stops targeting data for
    // the unsharded collection which previously had as primary a shard that no longer exists.
    assert.soon(() => {
        try {
            const count = router1UnshardedColl.count({_id: 1});
            return true;
        } catch (e) {
            print(e);
            return false;
        }
    });
    st.stop();
})();
})();