summaryrefslogtreecommitdiff
path: root/jstests/sharding/change_stream_metadata_notifications.js
blob: d3d322a9899c3eaee95ebb6996d45282fae69d8e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
// Tests metadata notifications of change streams on sharded collections.
// @tags: [
//   requires_majority_read_concern,
// ]
(function() {
"use strict";
load("jstests/libs/change_stream_util.js");        // For isChangeStreamsOptimizationEnabled.
load("jstests/libs/collection_drop_recreate.js");  // For assertDropAndRecreateCollection.
load('jstests/replsets/libs/two_phase_drops.js');  // For TwoPhaseDropCollectionTest.

const st = new ShardingTest({
    shards: 2,
    rs: {
        nodes: 1,
        enableMajorityReadConcern: '',
    }
});

const mongosDB = st.s0.getDB(jsTestName());
const mongosColl = mongosDB[jsTestName()];
const isChangeStreamOptimized = isChangeStreamsOptimizationEnabled(mongosDB);

assert.commandWorked(mongosDB.dropDatabase());

// Enable sharding on the test DB and ensure its primary is st.shard0.shardName.
assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());

// Shard the test collection on a field called 'shardKey'.
assert.commandWorked(
    mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {shardKey: 1}}));

// Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey].
assert.commandWorked(
    mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {shardKey: 0}}));

// Move the [0, MaxKey] chunk to st.shard1.shardName.
assert.commandWorked(mongosDB.adminCommand(
    {moveChunk: mongosColl.getFullName(), find: {shardKey: 1}, to: st.rs1.getURL()}));

// Write a document to each chunk.
assert.commandWorked(mongosColl.insert({shardKey: -1, _id: -1}, {writeConcern: {w: "majority"}}));
assert.commandWorked(mongosColl.insert({shardKey: 1, _id: 1}, {writeConcern: {w: "majority"}}));

let changeStream = mongosColl.watch();

// We awaited the replication of the first writes, so the change stream shouldn't return them.
assert.commandWorked(mongosColl.update({shardKey: -1, _id: -1}, {$set: {updated: true}}));
assert.commandWorked(mongosColl.update({shardKey: 1, _id: 1}, {$set: {updated: true}}));
assert.commandWorked(mongosColl.insert({shardKey: 2, _id: 2}));

// Drop the collection and test that we return a "drop" entry, followed by an "invalidate"
// entry.
mongosColl.drop();

// Test that we see the two writes that happened before the collection drop.
assert.soon(() => changeStream.hasNext());
let next = changeStream.next();
assert.eq(next.operationType, "update");
assert.eq(next.documentKey.shardKey, -1);
const resumeTokenFromFirstUpdate = next._id;

assert.soon(() => changeStream.hasNext());
next = changeStream.next();
assert.eq(next.operationType, "update");
assert.eq(next.documentKey.shardKey, 1);

assert.soon(() => changeStream.hasNext());
next = changeStream.next();
assert.eq(next.operationType, "insert");
assert.eq(next.documentKey._id, 2);

assert.soon(() => changeStream.hasNext());
next = changeStream.next();
assert.eq(next.operationType, "drop");
assert.eq(next.ns, {db: mongosDB.getName(), coll: mongosColl.getName()});

assert.soon(() => changeStream.hasNext());
next = changeStream.next();
assert.eq(next.operationType, "invalidate");

// Store the collection drop invalidate token for for subsequent tests.
const collectionDropinvalidateToken = next._id;

assert(!changeStream.hasNext());
assert(changeStream.isExhausted());

// If change stream optimization feature flag is enabled, verify that even after filtering out all
// events, the cursor still returns the invalidate resume token of the dropped collection.
if (isChangeStreamOptimized) {
    const resumeStream = mongosColl.watch([{$match: {operationType: "DummyOperationType"}}],
                                          {resumeAfter: resumeTokenFromFirstUpdate});
    assert.soon(() => {
        assert(!resumeStream.hasNext());
        return resumeStream.isExhausted();
    });
    assert.eq(resumeStream.getResumeToken(), collectionDropinvalidateToken);
}

// With an explicit collation, test that we can resume from before the collection drop.
changeStream =
    mongosColl.watch([], {resumeAfter: resumeTokenFromFirstUpdate, collation: {locale: "simple"}});

assert.soon(() => changeStream.hasNext());
next = changeStream.next();
assert.eq(next.operationType, "update");
assert.eq(next.documentKey, {shardKey: 1, _id: 1});

assert.soon(() => changeStream.hasNext());
next = changeStream.next();
assert.eq(next.operationType, "insert");
assert.eq(next.documentKey, {shardKey: 2, _id: 2});

assert.soon(() => changeStream.hasNext());
next = changeStream.next();
assert.eq(next.operationType, "drop");
assert.eq(next.ns, {db: mongosDB.getName(), coll: mongosColl.getName()});

assert.soon(() => changeStream.hasNext());
next = changeStream.next();
assert.eq(next.operationType, "invalidate");
assert(!changeStream.hasNext());
assert(changeStream.isExhausted());

// Test that we can resume the change stream without specifying an explicit collation.
assert.commandWorked(mongosDB.runCommand({
    aggregate: mongosColl.getName(),
    pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdate}}],
    cursor: {}
}));

// Recreate and shard the collection.
assert.commandWorked(mongosDB.createCollection(mongosColl.getName()));

// Shard the test collection on shardKey.
assert.commandWorked(
    mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {shardKey: 1}}));

// Test that resuming the change stream on the recreated collection succeeds, since we will not
// attempt to inherit the collection's default collation and can therefore ignore the new UUID.
assert.commandWorked(mongosDB.runCommand({
    aggregate: mongosColl.getName(),
    pipeline: [{$changeStream: {resumeAfter: resumeTokenFromFirstUpdate}}],
    cursor: {}
}));

// Recreate the collection as unsharded and open a change stream on it.
assertDropAndRecreateCollection(mongosDB, mongosColl.getName());

changeStream = mongosColl.watch();

// Drop the database and verify that the stream returns a collection drop followed by an
// invalidate.
assert.commandWorked(mongosDB.dropDatabase());

assert.soon(() => changeStream.hasNext());
next = changeStream.next();

// Store the token to be used as 'resumeAfter' token by other change streams.
const resumeTokenAfterDbDrop = next._id;

assert.eq(next.operationType, "drop");
assert.eq(next.ns, {db: mongosDB.getName(), coll: mongosColl.getName()});

assert.soon(() => changeStream.hasNext());
next = changeStream.next();
assert.eq(next.operationType, "invalidate");
assert(!changeStream.hasNext());
assert(changeStream.isExhausted());

// Store the database drop invalidate token for other change streams.
const dbDropInvalidateToken = next._id;

// If change stream optimization feature flag is enabled, verify that even after filtering out all
// events, the cursor still returns the invalidate resume token of the dropped database.
if (isChangeStreamOptimized) {
    const resumeStream = mongosColl.watch([{$match: {operationType: "DummyOperationType"}}],
                                          {resumeAfter: resumeTokenAfterDbDrop});
    assert.soon(() => {
        assert(!resumeStream.hasNext());
        return resumeStream.isExhausted();
    });
    assert.eq(resumeStream.getResumeToken(), dbDropInvalidateToken);
}

st.stop();
})();