summaryrefslogtreecommitdiff
path: root/jstests/sharding/supporting_unique_index_check_is_versioned.js
blob: a41048618fc7000c71a2a48deada9bcf08ab031d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
// Tests that $merge's enforcement of a unique index on mongos includes a shard and/or database
// version.
(function() {
"use strict";

load("jstests/libs/profiler.js");

function prepareProfilerOnShards(st, dbName) {
    st._rs.forEach(rs => {
        const shardDB = rs.test.getPrimary().getDB(dbName);
        shardDB.system.profile.drop();
        assert.commandWorked(shardDB.setProfilingLevel(2));
    });
}

function verifyProfilerListIndexesEntry(
    {profileDB, collName, expectShardVersion, expectDbVersion}) {
    profilerHasAtLeastOneMatchingEntryOrThrow({
        profileDB: profileDB,
        filter: {
            "command.listIndexes": collName,
            "command.shardVersion": {$exists: expectShardVersion},
            "command.databaseVersion": {$exists: expectDbVersion}
        }
    });
}

// Creates the source collection and target collection as unsharded collections on shard0.
function setUpUnshardedSourceAndTargetCollections(st, dbName, sourceCollName, targetCollName) {
    assert.commandWorked(st.s.getDB(dbName).createCollection(sourceCollName));
    assert.commandWorked(st.s.getDB(dbName).createCollection(targetCollName));
    st.ensurePrimaryShard(dbName, st.shard0.shardName);

    assert.commandWorked(st.s.getDB(dbName)[sourceCollName].insert({a: 10, b: 11}));
    assert.commandWorked(st.s.getDB(dbName)[targetCollName].insert({a: 10, b: 12}));
}

// Creates the source collection as an unsharded collection on shard0 and the target collection as a
// sharded collection with one chunk on shard0.
function setUpUnshardedSourceShardedTargetCollections(st, dbName, sourceCollName, targetCollName) {
    assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
    st.ensurePrimaryShard(dbName, st.shard0.shardName);

    assert.commandWorked(st.s.getDB(dbName)[sourceCollName].insert({a: 10, b: 11}));

    const targetColl = st.s.getDB(dbName)[targetCollName];
    assert.commandWorked(
        st.s.adminCommand({shardCollection: targetColl.getFullName(), key: {a: 1}}));
    assert.commandWorked(targetColl.insert({a: 10, b: 12}));
}

function expectMergeToSucceed(dbName, sourceCollName, targetCollName, onFields) {
    assert.commandWorked(st.s.getDB(dbName).runCommand({
        aggregate: sourceCollName,
        pipeline: [{
            $merge: {
                into: {db: dbName, coll: targetCollName},
                whenMatched: "replace",
                whenNotMatched: "insert",
                on: Object.keys(onFields)
            }
        }],
        cursor: {}
    }));
}

function expectMergeToFailBecauseOfMissingIndex(dbName, sourceCollName, targetCollName, onFields) {
    assert.commandFailedWithCode(st.s.getDB(dbName).runCommand({
        aggregate: sourceCollName,
        pipeline: [{
            $merge: {
                into: {db: dbName, coll: targetCollName},
                whenMatched: "replace",
                whenNotMatched: "insert",
                on: Object.keys(onFields)
            }
        }],
        cursor: {}
    }),
                                 51190);
}

const st = new ShardingTest({shards: 2, rs: {nodes: 1}, mongos: 2});
const sourceCollName = "sourceFoo";
const targetCollName = "targetFoo";

//
// Verify database versions are used to detect when the primary shard changes for an unsharded
// target collection.
//

(() => {
    const dbName = "testMovedPrimarySuccess";
    jsTestLog("Running test on database: " + dbName);

    setUpUnshardedSourceAndTargetCollections(st, dbName, sourceCollName, targetCollName);

    // Move the primary from shard0 to shard1 and create an index required for the merge only on the
    // new primary. Because of database versioning, the stale router should discover the primary has
    // changed when checking indexes for the merge and load them from the new primary.
    const otherRouter = st.s1;
    assert.commandWorked(otherRouter.adminCommand({movePrimary: dbName, to: st.shard1.shardName}));
    assert.commandWorked(
        otherRouter.getDB(dbName)[targetCollName].createIndex({a: 1, b: 1}, {unique: true}));

    // Run $merge and expect it to succeed because the stale router refreshes and is able to find
    // the correct indexes. Enable the profiler to verify shard/db versions later.
    prepareProfilerOnShards(st, dbName);
    expectMergeToSucceed(dbName, sourceCollName, targetCollName, {a: 1, b: 1});

    // Verify the aggregation succeeded on the expected shard and included the expected shard/db
    // versions.
    verifyProfilerListIndexesEntry({
        profileDB: st.rs1.getPrimary().getDB(dbName),
        collName: targetCollName,
        expectShardVersion: true,
        expectDbVersion: true
    });
})();

(() => {
    const dbName = "testMovedPrimaryFailure";
    jsTestLog("Running test on database: " + dbName);

    setUpUnshardedSourceAndTargetCollections(st, dbName, sourceCollName, targetCollName);

    // Create the index necessary for the merge below.
    assert.commandWorked(
        st.s.getDB(dbName)[targetCollName].createIndex({a: 1, b: 1}, {unique: true}));

    // Move the primary from shard0 to shard1 and drop the index required for the merge only on the
    // new primary. Note that the collection will be dropped from the old primary when the
    // movePrimary completes, so this case would pass without versioning, but is included for
    // completeness.
    const otherRouter = st.s1;
    assert.commandWorked(otherRouter.adminCommand({movePrimary: dbName, to: st.shard1.shardName}));

    const targetColl = otherRouter.getDB(dbName)[targetCollName];
    assert.commandWorked(targetColl.dropIndex("a_1_b_1"));

    // Run $merge and expect it to fail because the router refreshes and discovers the required
    // index no longer exists. Enable the profiler to verify shard/db versions later.
    prepareProfilerOnShards(st, dbName);
    expectMergeToFailBecauseOfMissingIndex(dbName, sourceCollName, targetCollName, {a: 1, b: 1});

    // Verify the aggregation succeeded on the expected shard and included the expected shard/db
    // versions.
    verifyProfilerListIndexesEntry({
        profileDB: st.rs1.getPrimary().getDB(dbName),
        collName: targetCollName,
        expectShardVersion: true,
        expectDbVersion: true
    });
})();

//
// Verify shard versions are used to detect when the shards that own chunks for a sharded target
// collection changes.
//

(() => {
    const dbName = "testMovedChunkSuccess";
    jsTestLog("Running test on database: " + dbName);

    setUpUnshardedSourceShardedTargetCollections(st, dbName, sourceCollName, targetCollName);

    // Move the only chunk for the test collection from shard0 to shard1 and create an index
    // required for the merge. Indexes are only created on shards that own chunks, so the index
    // will only exist on shard1.
    const otherRouter = st.s1;
    const targetColl = otherRouter.getDB(dbName)[targetCollName];
    assert.commandWorked(otherRouter.adminCommand(
        {moveChunk: targetColl.getFullName(), find: {a: 0}, to: st.shard1.shardName}));
    assert.commandWorked(targetColl.createIndex({a: 1, b: 1}, {unique: true}));

    // Run $merge and expect it to succeed because the stale router refreshes and is able to find
    // the correct indexes. Enable the profiler to verify shard/db versions later.
    prepareProfilerOnShards(st, dbName);
    expectMergeToSucceed(dbName, sourceCollName, targetCollName, {a: 1, b: 1});

    // Verify the aggregation succeeded on the expected shard and included the expected shard/db
    // versions.
    verifyProfilerListIndexesEntry({
        profileDB: st.rs1.getPrimary().getDB(dbName),
        collName: targetCollName,
        expectShardVersion: true,
        expectDbVersion: false
    });
})();

(() => {
    const dbName = "testMovedChunkFailure";
    jsTestLog("Running test on database: " + dbName);

    setUpUnshardedSourceShardedTargetCollections(st, dbName, sourceCollName, targetCollName);

    // Create the index necessary for the merge below.
    assert.commandWorked(
        st.s.getDB(dbName)[targetCollName].createIndex({a: 1, b: 1}, {unique: true}));

    // Move the only chunk for the test collection from shard0 to shard1 and drop the index required
    // for the merge. dropIndexes will only target shards that own chunks, so the index will still
    // exist on shard0.
    const otherRouter = st.s1;
    const targetColl = otherRouter.getDB(dbName)[targetCollName];
    assert.commandWorked(otherRouter.adminCommand(
        {moveChunk: targetColl.getFullName(), find: {a: 0}, to: st.shard1.shardName}));
    assert.commandWorked(targetColl.dropIndex("a_1_b_1"));

    // Run $merge and expect it to fail because the router refreshes and discovers the required
    // index no longer exists. Enable the profiler to verify shard/db versions later.
    prepareProfilerOnShards(st, dbName);
    expectMergeToFailBecauseOfMissingIndex(dbName, sourceCollName, targetCollName, {a: 1, b: 1});

    // Verify the aggregation succeeded on the expected shard and included the expected shard/db
    // versions.
    verifyProfilerListIndexesEntry({
        profileDB: st.rs1.getPrimary().getDB(dbName),
        collName: targetCollName,
        expectShardVersion: true,
        expectDbVersion: false
    });
})();

//
// Verify shard versions are used to detect when an unsharded collection becomes sharded or vice
// versa.
//

(() => {
    const dbName = "testBecomeShardedSuccess";
    jsTestLog("Running test on database: " + dbName);

    setUpUnshardedSourceAndTargetCollections(st, dbName, sourceCollName, targetCollName);

    // Shard the target collection through a different router, move its only chunk to shard1, and
    // create a new index required for the merge on only the shard with the chunk.
    const otherRouter = st.s1;
    assert.commandWorked(otherRouter.adminCommand({enableSharding: dbName}));

    const targetColl = otherRouter.getDB(dbName)[targetCollName];
    assert.commandWorked(targetColl.createIndex({a: 1}));
    assert.commandWorked(
        st.s1.adminCommand({shardCollection: targetColl.getFullName(), key: {a: 1}}));

    assert.commandWorked(otherRouter.adminCommand(
        {moveChunk: targetColl.getFullName(), find: {a: 0}, to: st.shard1.shardName}));
    assert.commandWorked(targetColl.createIndex({a: 1, b: 1}, {unique: true}));

    // Run $merge and expect it to succeed because the stale router refreshes and is able to find
    // the correct indexes. Enable the profiler to verify shard/db versions later.
    prepareProfilerOnShards(st, dbName);
    expectMergeToSucceed(dbName, sourceCollName, targetCollName, {a: 1, b: 1});

    // Verify the aggregation succeeded on the expected shard and included the expected shard/db
    // versions.
    verifyProfilerListIndexesEntry({
        profileDB: st.rs1.getPrimary().getDB(dbName),
        collName: targetCollName,
        expectShardVersion: true,
        expectDbVersion: false
    });
})();

(() => {
    const dbName = "testBecomeUnshardedFailure";
    jsTestLog("Running test on database: " + dbName);

    setUpUnshardedSourceAndTargetCollections(st, dbName, sourceCollName, targetCollName);

    // Create the index necessary for the merge below.
    assert.commandWorked(
        st.s.getDB(dbName)[targetCollName].createIndex({a: 1, b: 1}, {unique: true}));

    // Drop and recreate the sharded target collection as an unsharded collection with its primary
    // on shard1. Dropping the collection will also drop the index required for the merge.
    const otherRouter = st.s1;
    const targetColl = otherRouter.getDB(dbName)[targetCollName];
    assert(targetColl.drop());

    st.ensurePrimaryShard(dbName, st.shard1.shardName);
    assert.commandWorked(targetColl.insert({a: 10, b: 12}));

    // Run $merge and expect it to fail because the router refreshes and discovers the required
    // index no longer exists. Enable the profiler to verify shard/db versions later.
    prepareProfilerOnShards(st, dbName);
    expectMergeToFailBecauseOfMissingIndex(dbName, sourceCollName, targetCollName, {a: 1, b: 1});

    // Verify the aggregation succeeded on the expected shard and included the expected shard/db
    // versions.
    verifyProfilerListIndexesEntry({
        profileDB: st.rs1.getPrimary().getDB(dbName),
        collName: targetCollName,
        expectShardVersion: true,
        expectDbVersion: true
    });
})();

st.stop();
})();