summaryrefslogtreecommitdiff
path: root/jstests/change_streams
diff options
context:
space:
mode:
authorclang-format-7.0.1 <adam.martin@10gen.com>2019-07-26 18:20:35 -0400
committerADAM David Alan Martin <adam.martin@10gen.com>2019-07-27 11:02:23 -0400
commit134a4083953270e8a11430395357fb70a29047ad (patch)
treedd428e1230e31d92b20b393dfdc17ffe7fa79cb6 /jstests/change_streams
parent1e46b5049003f427047e723ea5fab15b5a9253ca (diff)
downloadmongo-134a4083953270e8a11430395357fb70a29047ad.tar.gz
SERVER-41772 Apply clang-format 7.0.1 to the codebase
Diffstat (limited to 'jstests/change_streams')
-rw-r--r--jstests/change_streams/apply_ops.js300
-rw-r--r--jstests/change_streams/apply_ops_resumability.js356
-rw-r--r--jstests/change_streams/ban_from_lookup.js24
-rw-r--r--jstests/change_streams/ban_from_views.js48
-rw-r--r--jstests/change_streams/change_stream.js513
-rw-r--r--jstests/change_streams/collation.js662
-rw-r--r--jstests/change_streams/does_not_implicitly_create_database.js120
-rw-r--r--jstests/change_streams/error_label.js40
-rw-r--r--jstests/change_streams/include_cluster_time.js80
-rw-r--r--jstests/change_streams/lookup_post_image.js477
-rw-r--r--jstests/change_streams/metadata_notifications.js491
-rw-r--r--jstests/change_streams/no_regex_leak.js94
-rw-r--r--jstests/change_streams/only_wake_getmore_for_relevant_changes.js309
-rw-r--r--jstests/change_streams/pipeline_cannot_modify_id_field.js260
-rw-r--r--jstests/change_streams/report_post_batch_resume_token.js364
-rw-r--r--jstests/change_streams/required_as_first_stage.js90
-rw-r--r--jstests/change_streams/resume_from_high_water_mark_token.js465
-rw-r--r--jstests/change_streams/shell_helper.js396
-rw-r--r--jstests/change_streams/start_at_cluster_time.js123
-rw-r--r--jstests/change_streams/whitelist.js64
-rw-r--r--jstests/change_streams/whole_cluster.js230
-rw-r--r--jstests/change_streams/whole_cluster_metadata_notifications.js466
-rw-r--r--jstests/change_streams/whole_cluster_resumability.js290
-rw-r--r--jstests/change_streams/whole_db.js138
-rw-r--r--jstests/change_streams/whole_db_metadata_notifications.js468
-rw-r--r--jstests/change_streams/whole_db_resumability.js362
26 files changed, 3585 insertions, 3645 deletions
diff --git a/jstests/change_streams/apply_ops.js b/jstests/change_streams/apply_ops.js
index 80065805c78..fa232f77f1b 100644
--- a/jstests/change_streams/apply_ops.js
+++ b/jstests/change_streams/apply_ops.js
@@ -2,160 +2,162 @@
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
-
- const otherCollName = "change_stream_apply_ops_2";
- const coll = assertDropAndRecreateCollection(db, "change_stream_apply_ops");
- assertDropAndRecreateCollection(db, otherCollName);
-
- const otherDbName = "change_stream_apply_ops_db";
- const otherDbCollName = "someColl";
- assertDropAndRecreateCollection(db.getSiblingDB(otherDbName), otherDbCollName);
-
- // Insert a document that gets deleted as part of the transaction.
- const kDeletedDocumentId = 0;
- coll.insert({_id: kDeletedDocumentId, a: "I was here before the transaction"},
- {writeConcern: {w: "majority"}});
-
- let cst = new ChangeStreamTest(db);
- let changeStream = cst.startWatchingChanges({
- pipeline: [{$changeStream: {}}, {$project: {"lsid.uid": 0}}],
- collection: coll,
- doNotModifyInPassthroughs:
- true // A collection drop only invalidates single-collection change streams.
- });
-
- const sessionOptions = {causalConsistency: false};
- const session = db.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(db.getName());
- const sessionColl = sessionDb[coll.getName()];
-
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
- assert.commandWorked(sessionColl.insert({_id: 1, a: 0}));
- assert.commandWorked(sessionColl.insert({_id: 2, a: 0}));
-
- // One insert on a collection that we're not watching. This should be skipped by the
- // single-collection changestream.
- assert.commandWorked(sessionDb[otherCollName].insert({_id: 111, a: "Doc on other collection"}));
-
- // One insert on a collection in a different database. This should be skipped by the single
- // collection and single-db changestreams.
- assert.commandWorked(
- session.getDatabase(otherDbName)[otherDbCollName].insert({_id: 222, a: "Doc on other DB"}));
-
- assert.commandWorked(sessionColl.updateOne({_id: 1}, {$inc: {a: 1}}));
-
- assert.commandWorked(sessionColl.deleteOne({_id: kDeletedDocumentId}));
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // Do applyOps on the collection that we care about. This is an "external" applyOps, though
- // (not run as part of a transaction) so its entries should be skipped in the change
- // stream. This checks that applyOps that don't have an 'lsid' and 'txnNumber' field do not
- // get unwound.
- assert.commandWorked(db.runCommand({
- applyOps: [
- {op: "i", ns: coll.getFullName(), o: {_id: 3, a: "SHOULD NOT READ THIS"}},
- ]
- }));
-
- // Drop the collection. This will trigger an "invalidate" event at the end of the stream.
- assert.commandWorked(db.runCommand({drop: coll.getName()}));
-
- // Define the set of changes expected for the single-collection case per the operations above.
- const expectedChanges = [
- {
- documentKey: {_id: 1},
- fullDocument: {_id: 1, a: 0},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "insert",
- lsid: session.getSessionId(),
- txnNumber: session.getTxnNumber_forTesting(),
- },
- {
- documentKey: {_id: 2},
- fullDocument: {_id: 2, a: 0},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "insert",
- lsid: session.getSessionId(),
- txnNumber: session.getTxnNumber_forTesting(),
- },
- {
- documentKey: {_id: 1},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "update",
- updateDescription: {removedFields: [], updatedFields: {a: 1}},
- lsid: session.getSessionId(),
- txnNumber: session.getTxnNumber_forTesting(),
- },
- {
- documentKey: {_id: kDeletedDocumentId},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "delete",
- lsid: session.getSessionId(),
- txnNumber: session.getTxnNumber_forTesting(),
- },
- {
- operationType: "drop",
- ns: {db: db.getName(), coll: coll.getName()},
- },
- ];
-
- // Verify that the stream returns the expected sequence of changes.
- const changes =
- cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges});
- // Single collection change stream should also be invalidated by the drop.
- cst.assertNextChangesEqual({
- cursor: changeStream,
- expectedChanges: [{operationType: "invalidate"}],
- expectInvalidate: true
- });
-
- // Obtain the clusterTime from the first change.
- const startTime = changes[0].clusterTime;
-
- // Add an entry for the insert on db.otherColl into expectedChanges.
- expectedChanges.splice(2, 0, {
- documentKey: {_id: 111},
- fullDocument: {_id: 111, a: "Doc on other collection"},
- ns: {db: db.getName(), coll: otherCollName},
+"use strict";
+
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+
+const otherCollName = "change_stream_apply_ops_2";
+const coll = assertDropAndRecreateCollection(db, "change_stream_apply_ops");
+assertDropAndRecreateCollection(db, otherCollName);
+
+const otherDbName = "change_stream_apply_ops_db";
+const otherDbCollName = "someColl";
+assertDropAndRecreateCollection(db.getSiblingDB(otherDbName), otherDbCollName);
+
+// Insert a document that gets deleted as part of the transaction.
+const kDeletedDocumentId = 0;
+coll.insert({_id: kDeletedDocumentId, a: "I was here before the transaction"},
+ {writeConcern: {w: "majority"}});
+
+let cst = new ChangeStreamTest(db);
+let changeStream = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {}}, {$project: {"lsid.uid": 0}}],
+ collection: coll,
+ doNotModifyInPassthroughs:
+ true // A collection drop only invalidates single-collection change streams.
+});
+
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = db.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(db.getName());
+const sessionColl = sessionDb[coll.getName()];
+
+session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+assert.commandWorked(sessionColl.insert({_id: 1, a: 0}));
+assert.commandWorked(sessionColl.insert({_id: 2, a: 0}));
+
+// One insert on a collection that we're not watching. This should be skipped by the
+// single-collection changestream.
+assert.commandWorked(sessionDb[otherCollName].insert({_id: 111, a: "Doc on other collection"}));
+
+// One insert on a collection in a different database. This should be skipped by the single
+// collection and single-db changestreams.
+assert.commandWorked(
+ session.getDatabase(otherDbName)[otherDbCollName].insert({_id: 222, a: "Doc on other DB"}));
+
+assert.commandWorked(sessionColl.updateOne({_id: 1}, {$inc: {a: 1}}));
+
+assert.commandWorked(sessionColl.deleteOne({_id: kDeletedDocumentId}));
+
+assert.commandWorked(session.commitTransaction_forTesting());
+
+// Do applyOps on the collection that we care about. This is an "external" applyOps, though
+// (not run as part of a transaction) so its entries should be skipped in the change
+// stream. This checks that applyOps that don't have an 'lsid' and 'txnNumber' field do not
+// get unwound.
+assert.commandWorked(db.runCommand({
+ applyOps: [
+ {op: "i", ns: coll.getFullName(), o: {_id: 3, a: "SHOULD NOT READ THIS"}},
+ ]
+}));
+
+// Drop the collection. This will trigger an "invalidate" event at the end of the stream.
+assert.commandWorked(db.runCommand({drop: coll.getName()}));
+
+// Define the set of changes expected for the single-collection case per the operations above.
+const expectedChanges = [
+ {
+ documentKey: {_id: 1},
+ fullDocument: {_id: 1, a: 0},
+ ns: {db: db.getName(), coll: coll.getName()},
operationType: "insert",
lsid: session.getSessionId(),
txnNumber: session.getTxnNumber_forTesting(),
- });
-
- // Verify that a whole-db stream returns the expected sequence of changes, including the insert
- // on the other collection but NOT the changes on the other DB or the manual applyOps.
- changeStream = cst.startWatchingChanges({
- pipeline: [{$changeStream: {startAtOperationTime: startTime}}, {$project: {"lsid.uid": 0}}],
- collection: 1
- });
- cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges});
-
- // Add an entry for the insert on otherDb.otherDbColl into expectedChanges.
- expectedChanges.splice(3, 0, {
- documentKey: {_id: 222},
- fullDocument: {_id: 222, a: "Doc on other DB"},
- ns: {db: otherDbName, coll: otherDbCollName},
+ },
+ {
+ documentKey: {_id: 2},
+ fullDocument: {_id: 2, a: 0},
+ ns: {db: db.getName(), coll: coll.getName()},
operationType: "insert",
lsid: session.getSessionId(),
txnNumber: session.getTxnNumber_forTesting(),
- });
-
- // Verify that a whole-cluster stream returns the expected sequence of changes, including the
- // inserts on the other collection and the other database, but NOT the manual applyOps.
- cst = new ChangeStreamTest(db.getSiblingDB("admin"));
- changeStream = cst.startWatchingChanges({
- pipeline: [
- {$changeStream: {startAtOperationTime: startTime, allChangesForCluster: true}},
- {$project: {"lsid.uid": 0}}
- ],
- collection: 1
- });
+ },
+ {
+ documentKey: {_id: 1},
+ ns: {db: db.getName(), coll: coll.getName()},
+ operationType: "update",
+ updateDescription: {removedFields: [], updatedFields: {a: 1}},
+ lsid: session.getSessionId(),
+ txnNumber: session.getTxnNumber_forTesting(),
+ },
+ {
+ documentKey: {_id: kDeletedDocumentId},
+ ns: {db: db.getName(), coll: coll.getName()},
+ operationType: "delete",
+ lsid: session.getSessionId(),
+ txnNumber: session.getTxnNumber_forTesting(),
+ },
+ {
+ operationType: "drop",
+ ns: {db: db.getName(), coll: coll.getName()},
+ },
+];
+
+// Verify that the stream returns the expected sequence of changes.
+const changes =
cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges});
-
- cst.cleanUp();
+// Single collection change stream should also be invalidated by the drop.
+cst.assertNextChangesEqual({
+ cursor: changeStream,
+ expectedChanges: [{operationType: "invalidate"}],
+ expectInvalidate: true
+});
+
+// Obtain the clusterTime from the first change.
+const startTime = changes[0].clusterTime;
+
+// Add an entry for the insert on db.otherColl into expectedChanges.
+expectedChanges.splice(2, 0, {
+ documentKey: {_id: 111},
+ fullDocument: {_id: 111, a: "Doc on other collection"},
+ ns: {db: db.getName(), coll: otherCollName},
+ operationType: "insert",
+ lsid: session.getSessionId(),
+ txnNumber: session.getTxnNumber_forTesting(),
+});
+
+// Verify that a whole-db stream returns the expected sequence of changes, including the insert
+// on the other collection but NOT the changes on the other DB or the manual applyOps.
+changeStream = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {startAtOperationTime: startTime}}, {$project: {"lsid.uid": 0}}],
+ collection: 1
+});
+cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges});
+
+// Add an entry for the insert on otherDb.otherDbColl into expectedChanges.
+expectedChanges.splice(3, 0, {
+ documentKey: {_id: 222},
+ fullDocument: {_id: 222, a: "Doc on other DB"},
+ ns: {db: otherDbName, coll: otherDbCollName},
+ operationType: "insert",
+ lsid: session.getSessionId(),
+ txnNumber: session.getTxnNumber_forTesting(),
+});
+
+// Verify that a whole-cluster stream returns the expected sequence of changes, including the
+// inserts on the other collection and the other database, but NOT the manual applyOps.
+cst = new ChangeStreamTest(db.getSiblingDB("admin"));
+changeStream = cst.startWatchingChanges({
+ pipeline: [
+ {$changeStream: {startAtOperationTime: startTime, allChangesForCluster: true}},
+ {$project: {"lsid.uid": 0}}
+ ],
+ collection: 1
+});
+cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges});
+
+cst.cleanUp();
}());
diff --git a/jstests/change_streams/apply_ops_resumability.js b/jstests/change_streams/apply_ops_resumability.js
index 7c61ed15792..bf581d40ee1 100644
--- a/jstests/change_streams/apply_ops_resumability.js
+++ b/jstests/change_streams/apply_ops_resumability.js
@@ -2,188 +2,188 @@
// @tags: [uses_transactions]
(function() {
- "use strict";
-
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
-
- const coll = assertDropAndRecreateCollection(db, "change_stream_apply_ops");
- const otherCollName = "change_stream_apply_ops_2";
- assertDropAndRecreateCollection(db, otherCollName);
-
- const otherDbName = "change_stream_apply_ops_db";
- const otherDbCollName = "someColl";
- assertDropAndRecreateCollection(db.getSiblingDB(otherDbName), otherDbCollName);
-
- let cst = new ChangeStreamTest(db);
- let changeStream = cst.startWatchingChanges(
- {pipeline: [{$changeStream: {}}, {$project: {"lsid.uid": 0}}], collection: coll});
-
- // Do an insert outside of a transaction.
- assert.commandWorked(coll.insert({_id: 0, a: 123}));
-
- // Open a session, and perform two writes within a transaction.
- const sessionOptions = {causalConsistency: false};
- const session = db.getMongo().startSession(sessionOptions);
- const sessionDb = session.getDatabase(db.getName());
- const sessionColl = sessionDb[coll.getName()];
-
- session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
- assert.commandWorked(sessionColl.insert({_id: 1, a: 0}));
- assert.commandWorked(sessionColl.insert({_id: 2, a: 0}));
-
- // One insert on a collection that we're not watching. This should be skipped by the
- // single-collection change stream.
- assert.commandWorked(sessionDb[otherCollName].insert({_id: 111, a: "Doc on other collection"}));
-
- // One insert on a collection in a different database. This should be skipped by the single
- // collection and single-db changestreams.
- assert.commandWorked(
- session.getDatabase(otherDbName)[otherDbCollName].insert({_id: 222, a: "Doc on other DB"}));
-
- assert.commandWorked(sessionColl.updateOne({_id: 1}, {$inc: {a: 1}}));
-
- assert.commandWorked(session.commitTransaction_forTesting());
-
- // Now insert another document, not part of a transaction.
- assert.commandWorked(coll.insert({_id: 3, a: 123}));
-
- // Define the set of changes expected for the single-collection case per the operations above.
- const expectedChanges = [
- {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, a: 123},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "insert",
- },
- {
- documentKey: {_id: 1},
- fullDocument: {_id: 1, a: 0},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "insert",
- lsid: session.getSessionId(),
- txnNumber: session.getTxnNumber_forTesting(),
- },
- {
- documentKey: {_id: 2},
- fullDocument: {_id: 2, a: 0},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "insert",
- lsid: session.getSessionId(),
- txnNumber: session.getTxnNumber_forTesting(),
- },
- {
- documentKey: {_id: 1},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "update",
- updateDescription: {removedFields: [], updatedFields: {a: 1}},
- lsid: session.getSessionId(),
- txnNumber: session.getTxnNumber_forTesting(),
- },
- {
- documentKey: {_id: 3},
- fullDocument: {_id: 3, a: 123},
- ns: {db: db.getName(), coll: coll.getName()},
- operationType: "insert",
- },
- ];
-
- //
- // Test behavior of single-collection change streams with apply ops.
- //
-
- // Verify that the stream returns the expected sequence of changes.
- const changes =
- cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges});
-
- // Record the first (non-transaction) change and the first in-transaction change.
- const nonTxnChange = changes[0], firstTxnChange = changes[1], secondTxnChange = changes[2];
-
- // Resume after the first non-transaction change. Be sure we see the documents from the
- // transaction again.
- changeStream = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: nonTxnChange._id}}, {$project: {"lsid.uid": 0}}],
- collection: coll
- });
- cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges.slice(1)});
-
- // Resume after the first transaction change. Be sure we see the second change again.
- changeStream = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: firstTxnChange._id}}, {$project: {"lsid.uid": 0}}],
- collection: coll
- });
- cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges.slice(2)});
-
- // Try starting another change stream from the _last_ change caused by the transaction. Verify
- // that we can see the insert performed after the transaction was committed.
- let otherCursor = cst.startWatchingChanges({
- pipeline:
- [{$changeStream: {resumeAfter: secondTxnChange._id}}, {$project: {"lsid.uid": 0}}],
- collection: coll,
- doNotModifyInPassthroughs: true // A collection drop only invalidates single-collection
- // change streams.
- });
- cst.assertNextChangesEqual({cursor: otherCursor, expectedChanges: expectedChanges.slice(3)});
-
- // Drop the collection. This will trigger a "drop" followed by an "invalidate" for the single
- // collection change stream.
- assert.commandWorked(db.runCommand({drop: coll.getName()}));
- let change = cst.getOneChange(otherCursor);
- assert.eq(change.operationType, "drop");
- assert.eq(change.ns, {db: db.getName(), coll: coll.getName()});
- change = cst.getOneChange(otherCursor, true);
- assert.eq(change.operationType, "invalidate");
-
- //
- // Test behavior of whole-db change streams with apply ops.
- //
-
- // For a whole-db or whole-cluster change stream, the collection drop should return a single
- // "drop" entry and not invalidate the stream.
- expectedChanges.push({operationType: "drop", ns: {db: db.getName(), coll: coll.getName()}});
-
- // Add an entry for the insert on db.otherColl into expectedChanges.
- expectedChanges.splice(3, 0, {
- documentKey: {_id: 111},
- fullDocument: {_id: 111, a: "Doc on other collection"},
- ns: {db: db.getName(), coll: otherCollName},
+"use strict";
+
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+
+const coll = assertDropAndRecreateCollection(db, "change_stream_apply_ops");
+const otherCollName = "change_stream_apply_ops_2";
+assertDropAndRecreateCollection(db, otherCollName);
+
+const otherDbName = "change_stream_apply_ops_db";
+const otherDbCollName = "someColl";
+assertDropAndRecreateCollection(db.getSiblingDB(otherDbName), otherDbCollName);
+
+let cst = new ChangeStreamTest(db);
+let changeStream = cst.startWatchingChanges(
+ {pipeline: [{$changeStream: {}}, {$project: {"lsid.uid": 0}}], collection: coll});
+
+// Do an insert outside of a transaction.
+assert.commandWorked(coll.insert({_id: 0, a: 123}));
+
+// Open a session, and perform two writes within a transaction.
+const sessionOptions = {
+ causalConsistency: false
+};
+const session = db.getMongo().startSession(sessionOptions);
+const sessionDb = session.getDatabase(db.getName());
+const sessionColl = sessionDb[coll.getName()];
+
+session.startTransaction({readConcern: {level: "snapshot"}, writeConcern: {w: "majority"}});
+assert.commandWorked(sessionColl.insert({_id: 1, a: 0}));
+assert.commandWorked(sessionColl.insert({_id: 2, a: 0}));
+
+// One insert on a collection that we're not watching. This should be skipped by the
+// single-collection change stream.
+assert.commandWorked(sessionDb[otherCollName].insert({_id: 111, a: "Doc on other collection"}));
+
+// One insert on a collection in a different database. This should be skipped by the single
+// collection and single-db changestreams.
+assert.commandWorked(
+ session.getDatabase(otherDbName)[otherDbCollName].insert({_id: 222, a: "Doc on other DB"}));
+
+assert.commandWorked(sessionColl.updateOne({_id: 1}, {$inc: {a: 1}}));
+
+assert.commandWorked(session.commitTransaction_forTesting());
+
+// Now insert another document, not part of a transaction.
+assert.commandWorked(coll.insert({_id: 3, a: 123}));
+
+// Define the set of changes expected for the single-collection case per the operations above.
+const expectedChanges = [
+ {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, a: 123},
+ ns: {db: db.getName(), coll: coll.getName()},
+ operationType: "insert",
+ },
+ {
+ documentKey: {_id: 1},
+ fullDocument: {_id: 1, a: 0},
+ ns: {db: db.getName(), coll: coll.getName()},
operationType: "insert",
lsid: session.getSessionId(),
txnNumber: session.getTxnNumber_forTesting(),
- });
-
- // Verify that a whole-db stream can be resumed from the middle of the transaction, and that it
- // will see all subsequent changes including the insert on the other collection but NOT the
- // changes on the other DB.
- changeStream = cst.startWatchingChanges({
- pipeline:
- [{$changeStream: {resumeAfter: secondTxnChange._id}}, {$project: {"lsid.uid": 0}}],
- collection: 1,
- });
- cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges.slice(3)});
-
- // Add an entry for the insert on otherDb.otherDbColl into expectedChanges.
- expectedChanges.splice(4, 0, {
- documentKey: {_id: 222},
- fullDocument: {_id: 222, a: "Doc on other DB"},
- ns: {db: otherDbName, coll: otherDbCollName},
+ },
+ {
+ documentKey: {_id: 2},
+ fullDocument: {_id: 2, a: 0},
+ ns: {db: db.getName(), coll: coll.getName()},
operationType: "insert",
lsid: session.getSessionId(),
txnNumber: session.getTxnNumber_forTesting(),
- });
-
- // Verify that a whole-cluster stream can be resumed from the middle of the transaction, and
- // that it will see all subsequent changes including the insert on the other collection and the
- // changes on the other DB.
- cst = new ChangeStreamTest(db.getSiblingDB("admin"));
- changeStream = cst.startWatchingChanges({
- pipeline: [
- {$changeStream: {resumeAfter: secondTxnChange._id, allChangesForCluster: true}},
- {$project: {"lsid.uid": 0}}
- ],
- collection: 1
- });
- cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges.slice(3)});
-
- cst.cleanUp();
+ },
+ {
+ documentKey: {_id: 1},
+ ns: {db: db.getName(), coll: coll.getName()},
+ operationType: "update",
+ updateDescription: {removedFields: [], updatedFields: {a: 1}},
+ lsid: session.getSessionId(),
+ txnNumber: session.getTxnNumber_forTesting(),
+ },
+ {
+ documentKey: {_id: 3},
+ fullDocument: {_id: 3, a: 123},
+ ns: {db: db.getName(), coll: coll.getName()},
+ operationType: "insert",
+ },
+];
+
+//
+// Test behavior of single-collection change streams with apply ops.
+//
+
+// Verify that the stream returns the expected sequence of changes.
+const changes =
+ cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges});
+
+// Record the first (non-transaction) change and the first in-transaction change.
+const nonTxnChange = changes[0], firstTxnChange = changes[1], secondTxnChange = changes[2];
+
+// Resume after the first non-transaction change. Be sure we see the documents from the
+// transaction again.
+changeStream = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: nonTxnChange._id}}, {$project: {"lsid.uid": 0}}],
+ collection: coll
+});
+cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges.slice(1)});
+
+// Resume after the first transaction change. Be sure we see the second change again.
+changeStream = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: firstTxnChange._id}}, {$project: {"lsid.uid": 0}}],
+ collection: coll
+});
+cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges.slice(2)});
+
+// Try starting another change stream from the _last_ change caused by the transaction. Verify
+// that we can see the insert performed after the transaction was committed.
+let otherCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: secondTxnChange._id}}, {$project: {"lsid.uid": 0}}],
+ collection: coll,
+ doNotModifyInPassthroughs: true // A collection drop only invalidates single-collection
+ // change streams.
+});
+cst.assertNextChangesEqual({cursor: otherCursor, expectedChanges: expectedChanges.slice(3)});
+
+// Drop the collection. This will trigger a "drop" followed by an "invalidate" for the single
+// collection change stream.
+assert.commandWorked(db.runCommand({drop: coll.getName()}));
+let change = cst.getOneChange(otherCursor);
+assert.eq(change.operationType, "drop");
+assert.eq(change.ns, {db: db.getName(), coll: coll.getName()});
+change = cst.getOneChange(otherCursor, true);
+assert.eq(change.operationType, "invalidate");
+
+//
+// Test behavior of whole-db change streams with apply ops.
+//
+
+// For a whole-db or whole-cluster change stream, the collection drop should return a single
+// "drop" entry and not invalidate the stream.
+expectedChanges.push({operationType: "drop", ns: {db: db.getName(), coll: coll.getName()}});
+
+// Add an entry for the insert on db.otherColl into expectedChanges.
+expectedChanges.splice(3, 0, {
+ documentKey: {_id: 111},
+ fullDocument: {_id: 111, a: "Doc on other collection"},
+ ns: {db: db.getName(), coll: otherCollName},
+ operationType: "insert",
+ lsid: session.getSessionId(),
+ txnNumber: session.getTxnNumber_forTesting(),
+});
+
+// Verify that a whole-db stream can be resumed from the middle of the transaction, and that it
+// will see all subsequent changes including the insert on the other collection but NOT the
+// changes on the other DB.
+changeStream = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: secondTxnChange._id}}, {$project: {"lsid.uid": 0}}],
+ collection: 1,
+});
+cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges.slice(3)});
+
+// Add an entry for the insert on otherDb.otherDbColl into expectedChanges.
+expectedChanges.splice(4, 0, {
+ documentKey: {_id: 222},
+ fullDocument: {_id: 222, a: "Doc on other DB"},
+ ns: {db: otherDbName, coll: otherDbCollName},
+ operationType: "insert",
+ lsid: session.getSessionId(),
+ txnNumber: session.getTxnNumber_forTesting(),
+});
+
+// Verify that a whole-cluster stream can be resumed from the middle of the transaction, and
+// that it will see all subsequent changes including the insert on the other collection and the
+// changes on the other DB.
+cst = new ChangeStreamTest(db.getSiblingDB("admin"));
+changeStream = cst.startWatchingChanges({
+ pipeline: [
+ {$changeStream: {resumeAfter: secondTxnChange._id, allChangesForCluster: true}},
+ {$project: {"lsid.uid": 0}}
+ ],
+ collection: 1
+});
+cst.assertNextChangesEqual({cursor: changeStream, expectedChanges: expectedChanges.slice(3)});
+
+cst.cleanUp();
}());
diff --git a/jstests/change_streams/ban_from_lookup.js b/jstests/change_streams/ban_from_lookup.js
index b799c3ce169..45d3c692eea 100644
--- a/jstests/change_streams/ban_from_lookup.js
+++ b/jstests/change_streams/ban_from_lookup.js
@@ -2,23 +2,23 @@
* Test that the $changeStream stage cannot be used in a $lookup pipeline or sub-pipeline.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- const coll = assertDropAndRecreateCollection(db, "change_stream_ban_from_lookup");
- const foreignColl = "unsharded";
+const coll = assertDropAndRecreateCollection(db, "change_stream_ban_from_lookup");
+const foreignColl = "unsharded";
- assert.writeOK(coll.insert({_id: 1}));
+assert.writeOK(coll.insert({_id: 1}));
- // Verify that we cannot create a $lookup using a pipeline which begins with $changeStream.
- assertErrorCode(
- coll, [{$lookup: {from: foreignColl, as: 'as', pipeline: [{$changeStream: {}}]}}], 51047);
+// Verify that we cannot create a $lookup using a pipeline which begins with $changeStream.
+assertErrorCode(
+ coll, [{$lookup: {from: foreignColl, as: 'as', pipeline: [{$changeStream: {}}]}}], 51047);
- // Verify that we cannot create a $lookup if its pipeline contains a sub-$lookup whose pipeline
- // begins with $changeStream.
- assertErrorCode(
+// Verify that we cannot create a $lookup if its pipeline contains a sub-$lookup whose pipeline
+// begins with $changeStream.
+assertErrorCode(
coll,
[{
$lookup: {
diff --git a/jstests/change_streams/ban_from_views.js b/jstests/change_streams/ban_from_views.js
index c06932e55b3..29f78710544 100644
--- a/jstests/change_streams/ban_from_views.js
+++ b/jstests/change_streams/ban_from_views.js
@@ -2,37 +2,37 @@
* Test that the $changeStream stage cannot be used in a view definition pipeline.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- const coll = assertDropAndRecreateCollection(db, "change_stream_ban_from_views");
- assert.writeOK(coll.insert({_id: 1}));
+const coll = assertDropAndRecreateCollection(db, "change_stream_ban_from_views");
+assert.writeOK(coll.insert({_id: 1}));
- const normalViewName = "nonChangeStreamView";
- const csViewName = "changeStreamView";
+const normalViewName = "nonChangeStreamView";
+const csViewName = "changeStreamView";
- assertDropCollection(db, normalViewName);
- assertDropCollection(db, csViewName);
+assertDropCollection(db, normalViewName);
+assertDropCollection(db, csViewName);
- const csPipe = [{$changeStream: {}}];
+const csPipe = [{$changeStream: {}}];
- // Create one valid view for testing purposes.
- assert.commandWorked(db.runCommand(
- {create: normalViewName, viewOn: coll.getName(), pipeline: [{$match: {_id: 1}}]}));
+// Create one valid view for testing purposes.
+assert.commandWorked(db.runCommand(
+ {create: normalViewName, viewOn: coll.getName(), pipeline: [{$match: {_id: 1}}]}));
- // Verify that we cannot create a view using a pipeline which begins with $changeStream.
- assert.commandFailedWithCode(
- db.runCommand({create: csViewName, viewOn: coll.getName(), pipeline: csPipe}),
- ErrorCodes.OptionNotSupportedOnView);
+// Verify that we cannot create a view using a pipeline which begins with $changeStream.
+assert.commandFailedWithCode(
+ db.runCommand({create: csViewName, viewOn: coll.getName(), pipeline: csPipe}),
+ ErrorCodes.OptionNotSupportedOnView);
- // We also cannot update an existing view to use a $changeStream pipeline.
- assert.commandFailedWithCode(
- db.runCommand({collMod: normalViewName, viewOn: coll.getName(), pipeline: csPipe}),
- ErrorCodes.OptionNotSupportedOnView);
+// We also cannot update an existing view to use a $changeStream pipeline.
+assert.commandFailedWithCode(
+ db.runCommand({collMod: normalViewName, viewOn: coll.getName(), pipeline: csPipe}),
+ ErrorCodes.OptionNotSupportedOnView);
- // Verify change streams cannot be created on views.
- assert.commandFailedWithCode(
- db.runCommand({aggregate: normalViewName, pipeline: [{$changeStream: {}}], cursor: {}}),
- ErrorCodes.CommandNotSupportedOnView);
+// Verify change streams cannot be created on views.
+assert.commandFailedWithCode(
+ db.runCommand({aggregate: normalViewName, pipeline: [{$changeStream: {}}], cursor: {}}),
+ ErrorCodes.CommandNotSupportedOnView);
})();
diff --git a/jstests/change_streams/change_stream.js b/jstests/change_streams/change_stream.js
index 396504f2439..6c03864cedd 100644
--- a/jstests/change_streams/change_stream.js
+++ b/jstests/change_streams/change_stream.js
@@ -3,268 +3,265 @@
// collection results in a failure in the secondary reads suite.
// @tags: [assumes_read_preference_unchanged]
(function() {
- "use strict";
-
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest and
- // assert[Valid|Invalid]ChangeStreamNss.
-
- const isMongos = FixtureHelpers.isMongos(db);
-
- // Drop and recreate the collections to be used in this set of tests.
- assertDropAndRecreateCollection(db, "t1");
- assertDropAndRecreateCollection(db, "t2");
-
- // Test that $changeStream only accepts an object as its argument.
- function checkArgFails(arg) {
- assert.commandFailedWithCode(
- db.runCommand({aggregate: "t1", pipeline: [{$changeStream: arg}], cursor: {}}), 50808);
- }
-
- checkArgFails(1);
- checkArgFails("invalid");
- checkArgFails(false);
- checkArgFails([1, 2, "invalid", {x: 1}]);
-
- // Test that a change stream cannot be opened on collections in the "admin", "config", or
- // "local" databases.
- assertInvalidChangeStreamNss("admin", "testColl");
- assertInvalidChangeStreamNss("config", "testColl");
- // Not allowed to access 'local' database through mongos.
- if (!isMongos) {
- assertInvalidChangeStreamNss("local", "testColl");
- }
-
- // Test that a change stream cannot be opened on 'system.' collections.
- assertInvalidChangeStreamNss(db.getName(), "system.users");
- assertInvalidChangeStreamNss(db.getName(), "system.profile");
- assertInvalidChangeStreamNss(db.getName(), "system.version");
-
- // Test that a change stream can be opened on namespaces with 'system' in the name, but not
- // considered an internal 'system dot' namespace.
- assertValidChangeStreamNss(db.getName(), "systemindexes");
- assertValidChangeStreamNss(db.getName(), "system_users");
-
- // Similar test but for DB names that are not considered internal.
- assert.writeOK(db.getSiblingDB("admincustomDB")["test"].insert({}));
- assertValidChangeStreamNss("admincustomDB");
-
- assert.writeOK(db.getSiblingDB("local_")["test"].insert({}));
- assertValidChangeStreamNss("local_");
-
- assert.writeOK(db.getSiblingDB("_config_")["test"].insert({}));
- assertValidChangeStreamNss("_config_");
-
- let cst = new ChangeStreamTest(db);
- let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
-
- jsTestLog("Testing single insert");
- // Test that if there are no changes, we return an empty batch.
- assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
-
- assert.writeOK(db.t1.insert({_id: 0, a: 1}));
- let expected = {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, a: 1},
- ns: {db: "test", coll: "t1"},
- operationType: "insert",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
-
- // Test that if there are no changes during a subsequent 'getMore', we return an empty batch.
- cursor = cst.getNextBatch(cursor);
- assert.eq(0, cursor.nextBatch.length, "Cursor had changes: " + tojson(cursor));
-
- jsTestLog("Testing second insert");
- cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
- assert.writeOK(db.t1.insert({_id: 1, a: 2}));
- expected = {
- documentKey: {_id: 1},
- fullDocument: {_id: 1, a: 2},
- ns: {db: "test", coll: "t1"},
- operationType: "insert",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
-
- jsTestLog("Testing update");
- cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
- assert.writeOK(db.t1.update({_id: 0}, {_id: 0, a: 3}));
- expected = {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, a: 3},
- ns: {db: "test", coll: "t1"},
- operationType: "replace",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
-
- jsTestLog("Testing update of another field");
- cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
- assert.writeOK(db.t1.update({_id: 0}, {_id: 0, b: 3}));
- expected = {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, b: 3},
+"use strict";
+
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest and
+ // assert[Valid|Invalid]ChangeStreamNss.
+
+const isMongos = FixtureHelpers.isMongos(db);
+
+// Drop and recreate the collections to be used in this set of tests.
+assertDropAndRecreateCollection(db, "t1");
+assertDropAndRecreateCollection(db, "t2");
+
+// Test that $changeStream only accepts an object as its argument.
+function checkArgFails(arg) {
+ assert.commandFailedWithCode(
+ db.runCommand({aggregate: "t1", pipeline: [{$changeStream: arg}], cursor: {}}), 50808);
+}
+
+checkArgFails(1);
+checkArgFails("invalid");
+checkArgFails(false);
+checkArgFails([1, 2, "invalid", {x: 1}]);
+
+// Test that a change stream cannot be opened on collections in the "admin", "config", or
+// "local" databases.
+assertInvalidChangeStreamNss("admin", "testColl");
+assertInvalidChangeStreamNss("config", "testColl");
+// Not allowed to access 'local' database through mongos.
+if (!isMongos) {
+ assertInvalidChangeStreamNss("local", "testColl");
+}
+
+// Test that a change stream cannot be opened on 'system.' collections.
+assertInvalidChangeStreamNss(db.getName(), "system.users");
+assertInvalidChangeStreamNss(db.getName(), "system.profile");
+assertInvalidChangeStreamNss(db.getName(), "system.version");
+
+// Test that a change stream can be opened on namespaces with 'system' in the name, but not
+// considered an internal 'system dot' namespace.
+assertValidChangeStreamNss(db.getName(), "systemindexes");
+assertValidChangeStreamNss(db.getName(), "system_users");
+
+// Similar test but for DB names that are not considered internal.
+assert.writeOK(db.getSiblingDB("admincustomDB")["test"].insert({}));
+assertValidChangeStreamNss("admincustomDB");
+
+assert.writeOK(db.getSiblingDB("local_")["test"].insert({}));
+assertValidChangeStreamNss("local_");
+
+assert.writeOK(db.getSiblingDB("_config_")["test"].insert({}));
+assertValidChangeStreamNss("_config_");
+
+let cst = new ChangeStreamTest(db);
+let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
+
+jsTestLog("Testing single insert");
+// Test that if there are no changes, we return an empty batch.
+assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
+
+assert.writeOK(db.t1.insert({_id: 0, a: 1}));
+let expected = {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, a: 1},
+ ns: {db: "test", coll: "t1"},
+ operationType: "insert",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+
+// Test that if there are no changes during a subsequent 'getMore', we return an empty batch.
+cursor = cst.getNextBatch(cursor);
+assert.eq(0, cursor.nextBatch.length, "Cursor had changes: " + tojson(cursor));
+
+jsTestLog("Testing second insert");
+cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
+assert.writeOK(db.t1.insert({_id: 1, a: 2}));
+expected = {
+ documentKey: {_id: 1},
+ fullDocument: {_id: 1, a: 2},
+ ns: {db: "test", coll: "t1"},
+ operationType: "insert",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+
+jsTestLog("Testing update");
+cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
+assert.writeOK(db.t1.update({_id: 0}, {_id: 0, a: 3}));
+expected = {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, a: 3},
+ ns: {db: "test", coll: "t1"},
+ operationType: "replace",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+
+jsTestLog("Testing update of another field");
+cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
+assert.writeOK(db.t1.update({_id: 0}, {_id: 0, b: 3}));
+expected = {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, b: 3},
+ ns: {db: "test", coll: "t1"},
+ operationType: "replace",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+
+jsTestLog("Testing upsert");
+cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
+assert.writeOK(db.t1.update({_id: 2}, {_id: 2, a: 4}, {upsert: true}));
+expected = {
+ documentKey: {_id: 2},
+ fullDocument: {_id: 2, a: 4},
+ ns: {db: "test", coll: "t1"},
+ operationType: "insert",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+
+jsTestLog("Testing partial update with $inc");
+assert.writeOK(db.t1.insert({_id: 3, a: 5, b: 1}));
+cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
+assert.writeOK(db.t1.update({_id: 3}, {$inc: {b: 2}}));
+expected = {
+ documentKey: {_id: 3},
+ ns: {db: "test", coll: "t1"},
+ operationType: "update",
+ updateDescription: {removedFields: [], updatedFields: {b: 3}},
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+
+jsTestLog("Testing multi:true update");
+assert.writeOK(db.t1.insert({_id: 4, a: 0, b: 1}));
+assert.writeOK(db.t1.insert({_id: 5, a: 0, b: 1}));
+cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
+assert.writeOK(db.t1.update({a: 0}, {$set: {b: 2}}, {multi: true}));
+expected = [
+ {
+ documentKey: {_id: 4},
ns: {db: "test", coll: "t1"},
- operationType: "replace",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
-
- jsTestLog("Testing upsert");
- cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
- assert.writeOK(db.t1.update({_id: 2}, {_id: 2, a: 4}, {upsert: true}));
- expected = {
- documentKey: {_id: 2},
- fullDocument: {_id: 2, a: 4},
- ns: {db: "test", coll: "t1"},
- operationType: "insert",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
-
- jsTestLog("Testing partial update with $inc");
- assert.writeOK(db.t1.insert({_id: 3, a: 5, b: 1}));
- cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
- assert.writeOK(db.t1.update({_id: 3}, {$inc: {b: 2}}));
- expected = {
- documentKey: {_id: 3},
+ operationType: "update",
+ updateDescription: {removedFields: [], updatedFields: {b: 2}}
+ },
+ {
+ documentKey: {_id: 5},
ns: {db: "test", coll: "t1"},
operationType: "update",
- updateDescription: {removedFields: [], updatedFields: {b: 3}},
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
-
- jsTestLog("Testing multi:true update");
- assert.writeOK(db.t1.insert({_id: 4, a: 0, b: 1}));
- assert.writeOK(db.t1.insert({_id: 5, a: 0, b: 1}));
- cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
- assert.writeOK(db.t1.update({a: 0}, {$set: {b: 2}}, {multi: true}));
- expected = [
- {
- documentKey: {_id: 4},
- ns: {db: "test", coll: "t1"},
- operationType: "update",
- updateDescription: {removedFields: [], updatedFields: {b: 2}}
- },
- {
- documentKey: {_id: 5},
- ns: {db: "test", coll: "t1"},
- operationType: "update",
- updateDescription: {removedFields: [], updatedFields: {b: 2}}
- }
- ];
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
-
- jsTestLog("Testing delete");
- cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
- assert.writeOK(db.t1.remove({_id: 1}));
- expected = {
- documentKey: {_id: 1},
+ updateDescription: {removedFields: [], updatedFields: {b: 2}}
+ }
+];
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+
+jsTestLog("Testing delete");
+cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
+assert.writeOK(db.t1.remove({_id: 1}));
+expected = {
+ documentKey: {_id: 1},
+ ns: {db: "test", coll: "t1"},
+ operationType: "delete",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+
+jsTestLog("Testing justOne:false delete");
+assert.writeOK(db.t1.insert({_id: 6, a: 1, b: 1}));
+assert.writeOK(db.t1.insert({_id: 7, a: 1, b: 1}));
+cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
+assert.writeOK(db.t1.remove({a: 1}, {justOne: false}));
+expected = [
+ {
+ documentKey: {_id: 6},
+ ns: {db: "test", coll: "t1"},
+ operationType: "delete",
+ },
+ {
+ documentKey: {_id: 7},
ns: {db: "test", coll: "t1"},
operationType: "delete",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
-
- jsTestLog("Testing justOne:false delete");
- assert.writeOK(db.t1.insert({_id: 6, a: 1, b: 1}));
- assert.writeOK(db.t1.insert({_id: 7, a: 1, b: 1}));
- cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
- assert.writeOK(db.t1.remove({a: 1}, {justOne: false}));
- expected = [
- {
- documentKey: {_id: 6},
- ns: {db: "test", coll: "t1"},
- operationType: "delete",
- },
- {
- documentKey: {_id: 7},
- ns: {db: "test", coll: "t1"},
- operationType: "delete",
- }
- ];
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
-
- jsTestLog("Testing intervening write on another collection");
- cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
- let t2cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t2});
- assert.writeOK(db.t2.insert({_id: 100, c: 1}));
- cst.assertNoChange(cursor);
- expected = {
- documentKey: {_id: 100},
- fullDocument: {_id: 100, c: 1},
- ns: {db: "test", coll: "t2"},
- operationType: "insert",
- };
- cst.assertNextChangesEqual({cursor: t2cursor, expectedChanges: [expected]});
-
- jsTestLog("Testing drop of unrelated collection");
- assert.writeOK(db.dropping.insert({}));
- assertDropCollection(db, db.dropping.getName());
- // Should still see the previous change from t2, shouldn't see anything about 'dropping'.
-
- jsTestLog("Testing insert that looks like rename");
- assertDropCollection(db, "dne1");
- assertDropCollection(db, "dne2");
- const dne1cursor =
- cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.dne1});
- const dne2cursor =
- cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.dne2});
- assert.writeOK(db.t2.insert({_id: 101, renameCollection: "test.dne1", to: "test.dne2"}));
- cst.assertNoChange(dne1cursor);
- cst.assertNoChange(dne2cursor);
-
- if (!isMongos) {
- jsTestLog("Ensuring attempt to read with legacy operations fails.");
- db.getMongo().forceReadMode('legacy');
- const legacyCursor =
- db.tailable2.aggregate([{$changeStream: {}}], {cursor: {batchSize: 0}});
- assert.throws(function() {
- legacyCursor.next();
- }, [], "Legacy getMore expected to fail on changeStream cursor.");
- db.getMongo().forceReadMode('commands');
}
-
- jsTestLog("Testing resumability");
- assertDropAndRecreateCollection(db, "resume1");
-
- // Note we do not project away 'id.ts' as it is part of the resume token.
- let resumeCursor =
- cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.resume1});
-
- // Insert a document and save the resulting change stream.
- assert.writeOK(db.resume1.insert({_id: 1}));
- const firstInsertChangeDoc = cst.getOneChange(resumeCursor);
- assert.docEq(firstInsertChangeDoc.fullDocument, {_id: 1});
-
- jsTestLog("Testing resume after one document.");
- resumeCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: firstInsertChangeDoc._id}}],
- collection: db.resume1,
- aggregateOptions: {cursor: {batchSize: 0}},
- });
-
- jsTestLog("Inserting additional documents.");
- assert.writeOK(db.resume1.insert({_id: 2}));
- const secondInsertChangeDoc = cst.getOneChange(resumeCursor);
- assert.docEq(secondInsertChangeDoc.fullDocument, {_id: 2});
- assert.writeOK(db.resume1.insert({_id: 3}));
- const thirdInsertChangeDoc = cst.getOneChange(resumeCursor);
- assert.docEq(thirdInsertChangeDoc.fullDocument, {_id: 3});
-
- jsTestLog("Testing resume after first document of three.");
- resumeCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: firstInsertChangeDoc._id}}],
- collection: db.resume1,
- aggregateOptions: {cursor: {batchSize: 0}},
- });
- assert.docEq(cst.getOneChange(resumeCursor), secondInsertChangeDoc);
- assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
-
- jsTestLog("Testing resume after second document of three.");
- resumeCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: secondInsertChangeDoc._id}}],
- collection: db.resume1,
- aggregateOptions: {cursor: {batchSize: 0}},
- });
- assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
-
- cst.cleanUp();
+];
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+
+jsTestLog("Testing intervening write on another collection");
+cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t1});
+let t2cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.t2});
+assert.writeOK(db.t2.insert({_id: 100, c: 1}));
+cst.assertNoChange(cursor);
+expected = {
+ documentKey: {_id: 100},
+ fullDocument: {_id: 100, c: 1},
+ ns: {db: "test", coll: "t2"},
+ operationType: "insert",
+};
+cst.assertNextChangesEqual({cursor: t2cursor, expectedChanges: [expected]});
+
+jsTestLog("Testing drop of unrelated collection");
+assert.writeOK(db.dropping.insert({}));
+assertDropCollection(db, db.dropping.getName());
+// Should still see the previous change from t2, shouldn't see anything about 'dropping'.
+
+jsTestLog("Testing insert that looks like rename");
+assertDropCollection(db, "dne1");
+assertDropCollection(db, "dne2");
+const dne1cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.dne1});
+const dne2cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.dne2});
+assert.writeOK(db.t2.insert({_id: 101, renameCollection: "test.dne1", to: "test.dne2"}));
+cst.assertNoChange(dne1cursor);
+cst.assertNoChange(dne2cursor);
+
+if (!isMongos) {
+ jsTestLog("Ensuring attempt to read with legacy operations fails.");
+ db.getMongo().forceReadMode('legacy');
+ const legacyCursor = db.tailable2.aggregate([{$changeStream: {}}], {cursor: {batchSize: 0}});
+ assert.throws(function() {
+ legacyCursor.next();
+ }, [], "Legacy getMore expected to fail on changeStream cursor.");
+ db.getMongo().forceReadMode('commands');
+}
+
+jsTestLog("Testing resumability");
+assertDropAndRecreateCollection(db, "resume1");
+
+// Note we do not project away 'id.ts' as it is part of the resume token.
+let resumeCursor =
+ cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: db.resume1});
+
+// Insert a document and save the resulting change stream.
+assert.writeOK(db.resume1.insert({_id: 1}));
+const firstInsertChangeDoc = cst.getOneChange(resumeCursor);
+assert.docEq(firstInsertChangeDoc.fullDocument, {_id: 1});
+
+jsTestLog("Testing resume after one document.");
+resumeCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: firstInsertChangeDoc._id}}],
+ collection: db.resume1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+
+jsTestLog("Inserting additional documents.");
+assert.writeOK(db.resume1.insert({_id: 2}));
+const secondInsertChangeDoc = cst.getOneChange(resumeCursor);
+assert.docEq(secondInsertChangeDoc.fullDocument, {_id: 2});
+assert.writeOK(db.resume1.insert({_id: 3}));
+const thirdInsertChangeDoc = cst.getOneChange(resumeCursor);
+assert.docEq(thirdInsertChangeDoc.fullDocument, {_id: 3});
+
+jsTestLog("Testing resume after first document of three.");
+resumeCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: firstInsertChangeDoc._id}}],
+ collection: db.resume1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+assert.docEq(cst.getOneChange(resumeCursor), secondInsertChangeDoc);
+assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
+
+jsTestLog("Testing resume after second document of three.");
+resumeCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: secondInsertChangeDoc._id}}],
+ collection: db.resume1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
+
+cst.cleanUp();
}());
diff --git a/jstests/change_streams/collation.js b/jstests/change_streams/collation.js
index e99f6064b60..3d50b564711 100644
--- a/jstests/change_streams/collation.js
+++ b/jstests/change_streams/collation.js
@@ -3,344 +3,332 @@
* default collation, and uses the simple collation if none is provided.
*/
(function() {
- "use strict";
-
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/change_stream_util.js"); // For 'ChangeStreamTest' and
- // 'runCommandChangeStreamPassthroughAware'.
-
- let cst = new ChangeStreamTest(db);
-
- const caseInsensitive = {locale: "en_US", strength: 2};
-
- let caseInsensitiveCollection = "change_stream_case_insensitive";
- assertDropCollection(db, caseInsensitiveCollection);
-
- // Test that you can open a change stream before the collection exists, and it will use the
- // simple collation. Tag this stream as 'doNotModifyInPassthroughs', since only individual
- // collections have the concept of a default collation.
- const simpleCollationStream = cst.startWatchingChanges({
- pipeline: [
- {$changeStream: {}},
- {
- $match:
- {$or: [{"fullDocument._id": "INSERT_ONE"}, {"fullDocument._id": "INSERT_TWO"}]}
- },
- {$project: {docId: "$fullDocument._id"}}
- ],
- collection: caseInsensitiveCollection,
- doNotModifyInPassthroughs: true
- });
-
- // Create the collection with a non-default collation. The stream should continue to use the
- // simple collation.
- caseInsensitiveCollection =
- assertCreateCollection(db, caseInsensitiveCollection, {collation: caseInsensitive});
- assert.commandWorked(
- caseInsensitiveCollection.insert([{_id: "insert_one"}, {_id: "INSERT_TWO"}]));
- cst.assertNextChangesEqual(
- {cursor: simpleCollationStream, expectedChanges: [{docId: "INSERT_TWO"}]});
-
- const caseInsensitivePipeline = [
+"use strict";
+
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/change_stream_util.js"); // For 'ChangeStreamTest' and
+ // 'runCommandChangeStreamPassthroughAware'.
+
+let cst = new ChangeStreamTest(db);
+
+const caseInsensitive = {
+ locale: "en_US",
+ strength: 2
+};
+
+let caseInsensitiveCollection = "change_stream_case_insensitive";
+assertDropCollection(db, caseInsensitiveCollection);
+
+// Test that you can open a change stream before the collection exists, and it will use the
+// simple collation. Tag this stream as 'doNotModifyInPassthroughs', since only individual
+// collections have the concept of a default collation.
+const simpleCollationStream = cst.startWatchingChanges({
+ pipeline: [
+ {$changeStream: {}},
+ {$match: {$or: [{"fullDocument._id": "INSERT_ONE"}, {"fullDocument._id": "INSERT_TWO"}]}},
+ {$project: {docId: "$fullDocument._id"}}
+ ],
+ collection: caseInsensitiveCollection,
+ doNotModifyInPassthroughs: true
+});
+
+// Create the collection with a non-default collation. The stream should continue to use the
+// simple collation.
+caseInsensitiveCollection =
+ assertCreateCollection(db, caseInsensitiveCollection, {collation: caseInsensitive});
+assert.commandWorked(caseInsensitiveCollection.insert([{_id: "insert_one"}, {_id: "INSERT_TWO"}]));
+cst.assertNextChangesEqual(
+ {cursor: simpleCollationStream, expectedChanges: [{docId: "INSERT_TWO"}]});
+
+const caseInsensitivePipeline = [
+ {$changeStream: {}},
+ {$match: {"fullDocument.text": "abc"}},
+ {$project: {docId: "$documentKey._id"}}
+];
+
+// Test that $changeStream will not implicitly adopt the default collation of the collection on
+// which it is run. Tag this stream as 'doNotModifyInPassthroughs'; whole-db and cluster-wide
+// streams do not have default collations.
+const didNotInheritCollationStream = cst.startWatchingChanges({
+ pipeline: caseInsensitivePipeline,
+ collection: caseInsensitiveCollection,
+ doNotModifyInPassthroughs: true
+});
+// Test that a collation can be explicitly specified for the $changeStream. This does not need
+// to be tagged 'doNotModifyInPassthroughs', since whole-db and cluster-wide changeStreams will
+// use an explicit collation if present.
+let explicitCaseInsensitiveStream = cst.startWatchingChanges({
+ pipeline: caseInsensitivePipeline,
+ collection: caseInsensitiveCollection,
+ aggregateOptions: {collation: caseInsensitive}
+});
+
+assert.writeOK(caseInsensitiveCollection.insert({_id: 0, text: "aBc"}));
+assert.writeOK(caseInsensitiveCollection.insert({_id: 1, text: "abc"}));
+
+// 'didNotInheritCollationStream' should not have inherited the collection's case-insensitive
+// default collation, and should only see the second insert. 'explicitCaseInsensitiveStream'
+// should see both inserts.
+cst.assertNextChangesEqual({cursor: didNotInheritCollationStream, expectedChanges: [{docId: 1}]});
+cst.assertNextChangesEqual(
+ {cursor: explicitCaseInsensitiveStream, expectedChanges: [{docId: 0}, {docId: 1}]});
+
+// Test that the collation does not apply to the scan over the oplog.
+const similarNameCollection = assertDropAndRecreateCollection(
+ db, "cHaNgE_sTrEaM_cAsE_iNsEnSiTiVe", {collation: {locale: "en_US"}});
+
+// We must recreate the explicitCaseInsensitiveStream and set 'doNotModifyInPassthroughs'. Whole
+// db and cluster-wide streams use the simple collation while scanning the oplog, but they don't
+// filter the oplog by collection name. The subsequent $match stage which we inject into the
+// pipeline to filter for a specific collection will obey the pipeline's case-insensitive
+// collation, meaning that 'cHaNgE_sTrEaM_cAsE_iNsEnSiTiVe' will match
+// 'change_stream_case_insensitive'.
+explicitCaseInsensitiveStream = cst.startWatchingChanges({
+ pipeline: caseInsensitivePipeline,
+ collection: caseInsensitiveCollection,
+ aggregateOptions: {collation: caseInsensitive},
+ doNotModifyInPassthroughs: true
+});
+
+assert.writeOK(similarNameCollection.insert({_id: 0, text: "aBc"}));
+assert.writeOK(caseInsensitiveCollection.insert({_id: 2, text: "ABC"}));
+
+// The case-insensitive stream should not see the first insert (to the other collection), only
+// the second. We do not expect to see the insert in 'didNotInheritCollationStream'.
+cst.assertNextChangesEqual({cursor: explicitCaseInsensitiveStream, expectedChanges: [{docId: 2}]});
+
+// Test that creating a collection without a collation does not invalidate any change streams
+// that were opened before the collection existed.
+(function() {
+let noCollationCollection = "change_stream_no_collation";
+assertDropCollection(db, noCollationCollection);
+
+const streamCreatedBeforeNoCollationCollection = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {}}, {$project: {docId: "$documentKey._id"}}],
+ collection: noCollationCollection
+});
+
+noCollationCollection = assertCreateCollection(db, noCollationCollection);
+assert.writeOK(noCollationCollection.insert({_id: 0}));
+
+cst.assertNextChangesEqual(
+ {cursor: streamCreatedBeforeNoCollationCollection, expectedChanges: [{docId: 0}]});
+}());
+
+// Test that creating a collection and explicitly specifying the simple collation does not
+// invalidate any change streams that were opened before the collection existed.
+(function() {
+let simpleCollationCollection = "change_stream_simple_collation";
+assertDropCollection(db, simpleCollationCollection);
+
+const streamCreatedBeforeSimpleCollationCollection = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {}}, {$project: {docId: "$documentKey._id"}}],
+ collection: simpleCollationCollection
+});
+
+simpleCollationCollection =
+ assertCreateCollection(db, simpleCollationCollection, {collation: {locale: "simple"}});
+assert.writeOK(simpleCollationCollection.insert({_id: 0}));
+
+cst.assertNextChangesEqual(
+ {cursor: streamCreatedBeforeSimpleCollationCollection, expectedChanges: [{docId: 0}]});
+}());
+
+// Test that creating a change stream with a non-default collation, then creating a collection
+// with the same collation will not invalidate the change stream.
+(function() {
+let frenchCollection = "change_stream_french_collation";
+assertDropCollection(db, frenchCollection);
+
+const frenchChangeStream = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {}}, {$project: {docId: "$documentKey._id"}}],
+ aggregateOptions: {collation: {locale: "fr"}},
+ collection: frenchCollection
+});
+
+frenchCollection = assertCreateCollection(db, frenchCollection, {collation: {locale: "fr"}});
+assert.writeOK(frenchCollection.insert({_id: 0}));
+
+cst.assertNextChangesEqual({cursor: frenchChangeStream, expectedChanges: [{docId: 0}]});
+}());
+
+// Test that creating a change stream with a non-default collation, then creating a collection
+// with *a different* collation will not invalidate the change stream.
+(function() {
+let germanCollection = "change_stream_german_collation";
+assertDropCollection(db, germanCollection);
+
+const englishCaseInsensitiveStream = cst.startWatchingChanges({
+ pipeline: [
+ {$changeStream: {}},
+ {$match: {"fullDocument.text": "abc"}},
+ {$project: {docId: "$documentKey._id"}}
+ ],
+ aggregateOptions: {collation: caseInsensitive},
+ collection: germanCollection
+});
+
+germanCollection = assertCreateCollection(db, germanCollection, {collation: {locale: "de"}});
+assert.writeOK(germanCollection.insert({_id: 0, text: "aBc"}));
+
+cst.assertNextChangesEqual({cursor: englishCaseInsensitiveStream, expectedChanges: [{docId: 0}]});
+}());
+
+// Test that creating a change stream with a non-default collation against a collection that has
+// a non-simple default collation will use the collation specified on the operation.
+(function() {
+const caseInsensitiveCollection = assertDropAndRecreateCollection(
+ db, "change_stream_case_insensitive", {collation: caseInsensitive});
+
+const englishCaseSensitiveStream = cst.startWatchingChanges({
+ pipeline: [
{$changeStream: {}},
{$match: {"fullDocument.text": "abc"}},
{$project: {docId: "$documentKey._id"}}
- ];
-
- // Test that $changeStream will not implicitly adopt the default collation of the collection on
- // which it is run. Tag this stream as 'doNotModifyInPassthroughs'; whole-db and cluster-wide
- // streams do not have default collations.
- const didNotInheritCollationStream = cst.startWatchingChanges({
- pipeline: caseInsensitivePipeline,
- collection: caseInsensitiveCollection,
- doNotModifyInPassthroughs: true
- });
- // Test that a collation can be explicitly specified for the $changeStream. This does not need
- // to be tagged 'doNotModifyInPassthroughs', since whole-db and cluster-wide changeStreams will
- // use an explicit collation if present.
- let explicitCaseInsensitiveStream = cst.startWatchingChanges({
- pipeline: caseInsensitivePipeline,
- collection: caseInsensitiveCollection,
- aggregateOptions: {collation: caseInsensitive}
- });
-
- assert.writeOK(caseInsensitiveCollection.insert({_id: 0, text: "aBc"}));
- assert.writeOK(caseInsensitiveCollection.insert({_id: 1, text: "abc"}));
-
- // 'didNotInheritCollationStream' should not have inherited the collection's case-insensitive
- // default collation, and should only see the second insert. 'explicitCaseInsensitiveStream'
- // should see both inserts.
- cst.assertNextChangesEqual(
- {cursor: didNotInheritCollationStream, expectedChanges: [{docId: 1}]});
- cst.assertNextChangesEqual(
- {cursor: explicitCaseInsensitiveStream, expectedChanges: [{docId: 0}, {docId: 1}]});
-
- // Test that the collation does not apply to the scan over the oplog.
- const similarNameCollection = assertDropAndRecreateCollection(
- db, "cHaNgE_sTrEaM_cAsE_iNsEnSiTiVe", {collation: {locale: "en_US"}});
-
- // We must recreate the explicitCaseInsensitiveStream and set 'doNotModifyInPassthroughs'. Whole
- // db and cluster-wide streams use the simple collation while scanning the oplog, but they don't
- // filter the oplog by collection name. The subsequent $match stage which we inject into the
- // pipeline to filter for a specific collection will obey the pipeline's case-insensitive
- // collation, meaning that 'cHaNgE_sTrEaM_cAsE_iNsEnSiTiVe' will match
- // 'change_stream_case_insensitive'.
- explicitCaseInsensitiveStream = cst.startWatchingChanges({
- pipeline: caseInsensitivePipeline,
- collection: caseInsensitiveCollection,
- aggregateOptions: {collation: caseInsensitive},
- doNotModifyInPassthroughs: true
- });
-
- assert.writeOK(similarNameCollection.insert({_id: 0, text: "aBc"}));
- assert.writeOK(caseInsensitiveCollection.insert({_id: 2, text: "ABC"}));
-
- // The case-insensitive stream should not see the first insert (to the other collection), only
- // the second. We do not expect to see the insert in 'didNotInheritCollationStream'.
- cst.assertNextChangesEqual(
- {cursor: explicitCaseInsensitiveStream, expectedChanges: [{docId: 2}]});
-
- // Test that creating a collection without a collation does not invalidate any change streams
- // that were opened before the collection existed.
- (function() {
- let noCollationCollection = "change_stream_no_collation";
- assertDropCollection(db, noCollationCollection);
-
- const streamCreatedBeforeNoCollationCollection = cst.startWatchingChanges({
- pipeline: [{$changeStream: {}}, {$project: {docId: "$documentKey._id"}}],
- collection: noCollationCollection
- });
-
- noCollationCollection = assertCreateCollection(db, noCollationCollection);
- assert.writeOK(noCollationCollection.insert({_id: 0}));
-
- cst.assertNextChangesEqual(
- {cursor: streamCreatedBeforeNoCollationCollection, expectedChanges: [{docId: 0}]});
- }());
-
- // Test that creating a collection and explicitly specifying the simple collation does not
- // invalidate any change streams that were opened before the collection existed.
- (function() {
- let simpleCollationCollection = "change_stream_simple_collation";
- assertDropCollection(db, simpleCollationCollection);
-
- const streamCreatedBeforeSimpleCollationCollection = cst.startWatchingChanges({
- pipeline: [{$changeStream: {}}, {$project: {docId: "$documentKey._id"}}],
- collection: simpleCollationCollection
- });
-
- simpleCollationCollection =
- assertCreateCollection(db, simpleCollationCollection, {collation: {locale: "simple"}});
- assert.writeOK(simpleCollationCollection.insert({_id: 0}));
-
- cst.assertNextChangesEqual(
- {cursor: streamCreatedBeforeSimpleCollationCollection, expectedChanges: [{docId: 0}]});
- }());
-
- // Test that creating a change stream with a non-default collation, then creating a collection
- // with the same collation will not invalidate the change stream.
- (function() {
- let frenchCollection = "change_stream_french_collation";
- assertDropCollection(db, frenchCollection);
-
- const frenchChangeStream = cst.startWatchingChanges({
- pipeline: [{$changeStream: {}}, {$project: {docId: "$documentKey._id"}}],
- aggregateOptions: {collation: {locale: "fr"}},
- collection: frenchCollection
- });
-
- frenchCollection =
- assertCreateCollection(db, frenchCollection, {collation: {locale: "fr"}});
- assert.writeOK(frenchCollection.insert({_id: 0}));
-
- cst.assertNextChangesEqual({cursor: frenchChangeStream, expectedChanges: [{docId: 0}]});
- }());
-
- // Test that creating a change stream with a non-default collation, then creating a collection
- // with *a different* collation will not invalidate the change stream.
- (function() {
- let germanCollection = "change_stream_german_collation";
- assertDropCollection(db, germanCollection);
-
- const englishCaseInsensitiveStream = cst.startWatchingChanges({
- pipeline: [
- {$changeStream: {}},
- {$match: {"fullDocument.text": "abc"}},
- {$project: {docId: "$documentKey._id"}}
- ],
- aggregateOptions: {collation: caseInsensitive},
- collection: germanCollection
- });
-
- germanCollection =
- assertCreateCollection(db, germanCollection, {collation: {locale: "de"}});
- assert.writeOK(germanCollection.insert({_id: 0, text: "aBc"}));
-
- cst.assertNextChangesEqual(
- {cursor: englishCaseInsensitiveStream, expectedChanges: [{docId: 0}]});
- }());
-
- // Test that creating a change stream with a non-default collation against a collection that has
- // a non-simple default collation will use the collation specified on the operation.
- (function() {
- const caseInsensitiveCollection = assertDropAndRecreateCollection(
- db, "change_stream_case_insensitive", {collation: caseInsensitive});
-
- const englishCaseSensitiveStream = cst.startWatchingChanges({
- pipeline: [
- {$changeStream: {}},
- {$match: {"fullDocument.text": "abc"}},
- {$project: {docId: "$documentKey._id"}}
- ],
- aggregateOptions: {collation: {locale: "en_US"}},
- collection: caseInsensitiveCollection
- });
-
- assert.writeOK(caseInsensitiveCollection.insert({_id: 0, text: "aBc"}));
- assert.writeOK(caseInsensitiveCollection.insert({_id: 1, text: "abc"}));
-
- cst.assertNextChangesEqual(
- {cursor: englishCaseSensitiveStream, expectedChanges: [{docId: 1}]});
- }());
-
- // Test that collation is supported by the shell helper. Test that creating a change stream with
- // a non-default collation against a collection that has a simple default collation will use the
- // collation specified on the operation.
- (function() {
- const noCollationCollection =
- assertDropAndRecreateCollection(db, "change_stream_no_collation");
-
- const cursor = noCollationCollection.watch(
- [{$match: {"fullDocument.text": "abc"}}, {$project: {docId: "$documentKey._id"}}],
- {collation: caseInsensitive});
- assert(!cursor.hasNext());
- assert.writeOK(noCollationCollection.insert({_id: 0, text: "aBc"}));
- assert.writeOK(noCollationCollection.insert({_id: 1, text: "abc"}));
- assert.soon(() => cursor.hasNext());
- assertChangeStreamEventEq(cursor.next(), {docId: 0});
- assert.soon(() => cursor.hasNext());
- assertChangeStreamEventEq(cursor.next(), {docId: 1});
- assert(!cursor.hasNext());
- }());
-
- // Test that we can resume a change stream on a collection that has been dropped without
- // requiring the user to explicitly specify the collation.
- (function() {
- const collName = "change_stream_case_insensitive";
- let caseInsensitiveCollection =
- assertDropAndRecreateCollection(db, collName, {collation: caseInsensitive});
-
- let changeStream = caseInsensitiveCollection.watch([{$match: {"fullDocument.text": "abc"}}],
- {collation: caseInsensitive});
-
- assert.writeOK(caseInsensitiveCollection.insert({_id: 0, text: "abc"}));
-
- assert.soon(() => changeStream.hasNext());
- const next = changeStream.next();
- assert.docEq(next.documentKey, {_id: 0});
- const resumeToken = next._id;
-
- // Insert a second document to see after resuming.
- assert.writeOK(caseInsensitiveCollection.insert({_id: "dropped_coll", text: "ABC"}));
-
- // Drop the collection to invalidate the stream.
- assertDropCollection(db, collName);
-
- // Test that a $changeStream is allowed to resume on the dropped collection with an explicit
- // collation, even if it doesn't match the original collection's default collation.
- changeStream = caseInsensitiveCollection.watch(
- [{$match: {"fullDocument.text": "ABC"}}],
- {resumeAfter: resumeToken, collation: {locale: "simple"}});
-
- assert.soon(() => changeStream.hasNext());
- assert.docEq(changeStream.next().documentKey, {_id: "dropped_coll"});
-
- // Test that a pipeline without an explicit collation is allowed to resume the change stream
- // after the collection has been dropped, and it will use the simple collation. Do not
- // modify this in the passthrough suite(s) since only individual collections have the
- // concept of a default collation.
- const doNotModifyInPassthroughs = true;
- const cmdRes = assert.commandWorked(runCommandChangeStreamPassthroughAware(
- db,
- {
- aggregate: collName,
- pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
- cursor: {},
- },
- doNotModifyInPassthroughs));
-
- changeStream = new DBCommandCursor(db, cmdRes);
- assert.soon(() => changeStream.hasNext());
- assert.docEq(changeStream.next().documentKey, {_id: "dropped_coll"});
- }());
-
- // Test that the default collation of a new version of the collection is not applied when
- // resuming a change stream from before a collection drop.
- (function() {
- const collName = "change_stream_case_insensitive";
- let caseInsensitiveCollection =
- assertDropAndRecreateCollection(db, collName, {collation: caseInsensitive});
-
- let changeStream = caseInsensitiveCollection.watch([{$match: {"fullDocument.text": "abc"}}],
- {collation: caseInsensitive});
-
- assert.writeOK(caseInsensitiveCollection.insert({_id: 0, text: "abc"}));
-
- assert.soon(() => changeStream.hasNext());
- const next = changeStream.next();
- assert.docEq(next.documentKey, {_id: 0});
- const resumeToken = next._id;
-
- // Insert a second document to see after resuming.
- assert.writeOK(caseInsensitiveCollection.insert({_id: "dropped_coll", text: "ABC"}));
-
- // Recreate the collection with a different collation.
- caseInsensitiveCollection = assertDropAndRecreateCollection(
- db, caseInsensitiveCollection.getName(), {collation: {locale: "simple"}});
- assert.writeOK(caseInsensitiveCollection.insert({_id: "new collection", text: "abc"}));
-
- // Verify that the stream sees the insert before the drop and then is exhausted. We won't
- // see the invalidate because the pipeline has a $match stage after the $changeStream.
- assert.soon(() => changeStream.hasNext());
- assert.docEq(changeStream.next().fullDocument, {_id: "dropped_coll", text: "ABC"});
- // Only single-collection streams will be exhausted from the drop. Use 'next()' instead of
- // 'isExhausted()' to force a getMore since the previous getMore may not include the
- // collection drop, which is more likely with sharded collections on slow machines.
- if (!isChangeStreamPassthrough()) {
- assert.throws(() => changeStream.next());
- }
-
- // Test that a pipeline with an explicit collation is allowed to resume from before the
- // collection is dropped and recreated.
- changeStream =
- caseInsensitiveCollection.watch([{$match: {"fullDocument.text": "ABC"}}],
- {resumeAfter: resumeToken, collation: {locale: "fr"}});
-
- assert.soon(() => changeStream.hasNext());
- assert.docEq(changeStream.next().documentKey, {_id: "dropped_coll"});
- // Only single-collection streams will be exhausted from the drop. Use 'next()' instead of
- // 'isExhausted()' to force a getMore since the previous getMore may not include the
- // collection drop, which is more likely with sharded collections on slow machines.
- if (!isChangeStreamPassthrough()) {
- assert.throws(() => changeStream.next());
- }
-
- // Test that a pipeline without an explicit collation is allowed to resume, even though the
- // collection has been recreated with the same default collation as it had previously. Do
- // not modify this command in the passthrough suite(s) since only individual collections
- // have the concept of a default collation.
- const doNotModifyInPassthroughs = true;
- const cmdRes = assert.commandWorked(runCommandChangeStreamPassthroughAware(
- db,
- {
- aggregate: collName,
- pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
- cursor: {}
- },
- doNotModifyInPassthroughs));
-
- changeStream = new DBCommandCursor(db, cmdRes);
- assert.soon(() => changeStream.hasNext());
- assert.docEq(changeStream.next().documentKey, {_id: "dropped_coll"});
- }());
+ ],
+ aggregateOptions: {collation: {locale: "en_US"}},
+ collection: caseInsensitiveCollection
+});
+
+assert.writeOK(caseInsensitiveCollection.insert({_id: 0, text: "aBc"}));
+assert.writeOK(caseInsensitiveCollection.insert({_id: 1, text: "abc"}));
+
+cst.assertNextChangesEqual({cursor: englishCaseSensitiveStream, expectedChanges: [{docId: 1}]});
+}());
+
+// Test that collation is supported by the shell helper. Test that creating a change stream with
+// a non-default collation against a collection that has a simple default collation will use the
+// collation specified on the operation.
+(function() {
+const noCollationCollection = assertDropAndRecreateCollection(db, "change_stream_no_collation");
+
+const cursor = noCollationCollection.watch(
+ [{$match: {"fullDocument.text": "abc"}}, {$project: {docId: "$documentKey._id"}}],
+ {collation: caseInsensitive});
+assert(!cursor.hasNext());
+assert.writeOK(noCollationCollection.insert({_id: 0, text: "aBc"}));
+assert.writeOK(noCollationCollection.insert({_id: 1, text: "abc"}));
+assert.soon(() => cursor.hasNext());
+assertChangeStreamEventEq(cursor.next(), {docId: 0});
+assert.soon(() => cursor.hasNext());
+assertChangeStreamEventEq(cursor.next(), {docId: 1});
+assert(!cursor.hasNext());
+}());
+
+// Test that we can resume a change stream on a collection that has been dropped without
+// requiring the user to explicitly specify the collation.
+(function() {
+const collName = "change_stream_case_insensitive";
+let caseInsensitiveCollection =
+ assertDropAndRecreateCollection(db, collName, {collation: caseInsensitive});
+
+let changeStream = caseInsensitiveCollection.watch([{$match: {"fullDocument.text": "abc"}}],
+ {collation: caseInsensitive});
+
+assert.writeOK(caseInsensitiveCollection.insert({_id: 0, text: "abc"}));
+
+assert.soon(() => changeStream.hasNext());
+const next = changeStream.next();
+assert.docEq(next.documentKey, {_id: 0});
+const resumeToken = next._id;
+
+// Insert a second document to see after resuming.
+assert.writeOK(caseInsensitiveCollection.insert({_id: "dropped_coll", text: "ABC"}));
+
+// Drop the collection to invalidate the stream.
+assertDropCollection(db, collName);
+
+// Test that a $changeStream is allowed to resume on the dropped collection with an explicit
+// collation, even if it doesn't match the original collection's default collation.
+changeStream =
+ caseInsensitiveCollection.watch([{$match: {"fullDocument.text": "ABC"}}],
+ {resumeAfter: resumeToken, collation: {locale: "simple"}});
+
+assert.soon(() => changeStream.hasNext());
+assert.docEq(changeStream.next().documentKey, {_id: "dropped_coll"});
+
+// Test that a pipeline without an explicit collation is allowed to resume the change stream
+// after the collection has been dropped, and it will use the simple collation. Do not
+// modify this in the passthrough suite(s) since only individual collections have the
+// concept of a default collation.
+const doNotModifyInPassthroughs = true;
+const cmdRes = assert.commandWorked(runCommandChangeStreamPassthroughAware(
+ db,
+ {
+ aggregate: collName,
+ pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
+ cursor: {},
+ },
+ doNotModifyInPassthroughs));
+
+changeStream = new DBCommandCursor(db, cmdRes);
+assert.soon(() => changeStream.hasNext());
+assert.docEq(changeStream.next().documentKey, {_id: "dropped_coll"});
+}());
+
+// Test that the default collation of a new version of the collection is not applied when
+// resuming a change stream from before a collection drop.
+(function() {
+const collName = "change_stream_case_insensitive";
+let caseInsensitiveCollection =
+ assertDropAndRecreateCollection(db, collName, {collation: caseInsensitive});
+
+let changeStream = caseInsensitiveCollection.watch([{$match: {"fullDocument.text": "abc"}}],
+ {collation: caseInsensitive});
+
+assert.writeOK(caseInsensitiveCollection.insert({_id: 0, text: "abc"}));
+
+assert.soon(() => changeStream.hasNext());
+const next = changeStream.next();
+assert.docEq(next.documentKey, {_id: 0});
+const resumeToken = next._id;
+
+// Insert a second document to see after resuming.
+assert.writeOK(caseInsensitiveCollection.insert({_id: "dropped_coll", text: "ABC"}));
+
+// Recreate the collection with a different collation.
+caseInsensitiveCollection = assertDropAndRecreateCollection(
+ db, caseInsensitiveCollection.getName(), {collation: {locale: "simple"}});
+assert.writeOK(caseInsensitiveCollection.insert({_id: "new collection", text: "abc"}));
+
+// Verify that the stream sees the insert before the drop and then is exhausted. We won't
+// see the invalidate because the pipeline has a $match stage after the $changeStream.
+assert.soon(() => changeStream.hasNext());
+assert.docEq(changeStream.next().fullDocument, {_id: "dropped_coll", text: "ABC"});
+// Only single-collection streams will be exhausted from the drop. Use 'next()' instead of
+// 'isExhausted()' to force a getMore since the previous getMore may not include the
+// collection drop, which is more likely with sharded collections on slow machines.
+if (!isChangeStreamPassthrough()) {
+ assert.throws(() => changeStream.next());
+}
+
+// Test that a pipeline with an explicit collation is allowed to resume from before the
+// collection is dropped and recreated.
+changeStream =
+ caseInsensitiveCollection.watch([{$match: {"fullDocument.text": "ABC"}}],
+ {resumeAfter: resumeToken, collation: {locale: "fr"}});
+
+assert.soon(() => changeStream.hasNext());
+assert.docEq(changeStream.next().documentKey, {_id: "dropped_coll"});
+// Only single-collection streams will be exhausted from the drop. Use 'next()' instead of
+// 'isExhausted()' to force a getMore since the previous getMore may not include the
+// collection drop, which is more likely with sharded collections on slow machines.
+if (!isChangeStreamPassthrough()) {
+ assert.throws(() => changeStream.next());
+}
+
+// Test that a pipeline without an explicit collation is allowed to resume, even though the
+// collection has been recreated with the same default collation as it had previously. Do
+// not modify this command in the passthrough suite(s) since only individual collections
+// have the concept of a default collation.
+const doNotModifyInPassthroughs = true;
+const cmdRes = assert.commandWorked(runCommandChangeStreamPassthroughAware(
+ db,
+ {aggregate: collName, pipeline: [{$changeStream: {resumeAfter: resumeToken}}], cursor: {}},
+ doNotModifyInPassthroughs));
+
+changeStream = new DBCommandCursor(db, cmdRes);
+assert.soon(() => changeStream.hasNext());
+assert.docEq(changeStream.next().documentKey, {_id: "dropped_coll"});
+}());
})();
diff --git a/jstests/change_streams/does_not_implicitly_create_database.js b/jstests/change_streams/does_not_implicitly_create_database.js
index 052a53585bd..b6ffe0c83a4 100644
--- a/jstests/change_streams/does_not_implicitly_create_database.js
+++ b/jstests/change_streams/does_not_implicitly_create_database.js
@@ -4,78 +4,74 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/change_stream_util.js"); // For 'ChangeStreamTest'.
+load("jstests/libs/change_stream_util.js"); // For 'ChangeStreamTest'.
- // Ensure that the test DB does not exist.
- const testDB = db.getSiblingDB(jsTestName());
- assert.commandWorked(testDB.dropDatabase());
+// Ensure that the test DB does not exist.
+const testDB = db.getSiblingDB(jsTestName());
+assert.commandWorked(testDB.dropDatabase());
- let dbList = assert.commandWorked(
- db.adminCommand({listDatabases: 1, nameOnly: true, filter: {name: testDB.getName()}}));
- assert.docEq(dbList.databases, []);
+let dbList = assert.commandWorked(
+ db.adminCommand({listDatabases: 1, nameOnly: true, filter: {name: testDB.getName()}}));
+assert.docEq(dbList.databases, []);
- const collName = "test";
+const collName = "test";
- // Start a new $changeStream on the non-existent db.
- const cst = new ChangeStreamTest(testDB);
- const changeStreamCursor =
- cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: collName});
+// Start a new $changeStream on the non-existent db.
+const cst = new ChangeStreamTest(testDB);
+const changeStreamCursor =
+ cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: collName});
- // Confirm that a $changeStream cursor has been opened on the namespace.
- assert.gt(changeStreamCursor.id, 0);
+// Confirm that a $changeStream cursor has been opened on the namespace.
+assert.gt(changeStreamCursor.id, 0);
- // Confirm that the database has not been implicitly created.
- dbList = assert.commandWorked(
- db.adminCommand({listDatabases: 1, nameOnly: true, filter: {name: testDB.getName()}}));
- assert.docEq(dbList.databases, []);
+// Confirm that the database has not been implicitly created.
+dbList = assert.commandWorked(
+ db.adminCommand({listDatabases: 1, nameOnly: true, filter: {name: testDB.getName()}}));
+assert.docEq(dbList.databases, []);
- // Confirm that a non-$changeStream aggregation on the non-existent database returns an empty
- // cursor.
- const nonCsCmdRes = assert.commandWorked(
- testDB.runCommand({aggregate: collName, pipeline: [{$match: {}}], cursor: {}}));
- assert.docEq(nonCsCmdRes.cursor.firstBatch, []);
- assert.eq(nonCsCmdRes.cursor.id, 0);
+// Confirm that a non-$changeStream aggregation on the non-existent database returns an empty
+// cursor.
+const nonCsCmdRes = assert.commandWorked(
+ testDB.runCommand({aggregate: collName, pipeline: [{$match: {}}], cursor: {}}));
+assert.docEq(nonCsCmdRes.cursor.firstBatch, []);
+assert.eq(nonCsCmdRes.cursor.id, 0);
- // Now perform some writes into the collection...
- assert.commandWorked(testDB[collName].insert({_id: 1}));
- assert.commandWorked(testDB[collName].insert({_id: 2}));
- assert.commandWorked(testDB[collName].update({_id: 1}, {$set: {updated: true}}));
- assert.commandWorked(testDB[collName].remove({_id: 2}));
+// Now perform some writes into the collection...
+assert.commandWorked(testDB[collName].insert({_id: 1}));
+assert.commandWorked(testDB[collName].insert({_id: 2}));
+assert.commandWorked(testDB[collName].update({_id: 1}, {$set: {updated: true}}));
+assert.commandWorked(testDB[collName].remove({_id: 2}));
- // ... confirm that the database has been created...
- dbList = assert.commandWorked(
- db.adminCommand({listDatabases: 1, nameOnly: true, filter: {name: testDB.getName()}}));
- assert.docEq(dbList.databases, [{name: testDB.getName()}]);
+// ... confirm that the database has been created...
+dbList = assert.commandWorked(
+ db.adminCommand({listDatabases: 1, nameOnly: true, filter: {name: testDB.getName()}}));
+assert.docEq(dbList.databases, [{name: testDB.getName()}]);
- // ... and verify that the changes are observed by the stream.
- const expectedChanges = [
- {
- documentKey: {_id: 1},
- fullDocument: {_id: 1},
- ns: {db: testDB.getName(), coll: collName},
- operationType: "insert"
- },
- {
- documentKey: {_id: 2},
- fullDocument: {_id: 2},
- ns: {db: testDB.getName(), coll: collName},
- operationType: "insert"
- },
- {
- documentKey: {_id: 1},
- ns: {db: testDB.getName(), coll: collName},
- updateDescription: {removedFields: [], updatedFields: {updated: true}},
- operationType: "update"
- },
- {
- documentKey: {_id: 2},
- ns: {db: testDB.getName(), coll: collName},
- operationType: "delete"
- },
- ];
+// ... and verify that the changes are observed by the stream.
+const expectedChanges = [
+ {
+ documentKey: {_id: 1},
+ fullDocument: {_id: 1},
+ ns: {db: testDB.getName(), coll: collName},
+ operationType: "insert"
+ },
+ {
+ documentKey: {_id: 2},
+ fullDocument: {_id: 2},
+ ns: {db: testDB.getName(), coll: collName},
+ operationType: "insert"
+ },
+ {
+ documentKey: {_id: 1},
+ ns: {db: testDB.getName(), coll: collName},
+ updateDescription: {removedFields: [], updatedFields: {updated: true}},
+ operationType: "update"
+ },
+ {documentKey: {_id: 2}, ns: {db: testDB.getName(), coll: collName}, operationType: "delete"},
+];
- cst.assertNextChangesEqual({cursor: changeStreamCursor, expectedChanges: expectedChanges});
- cst.cleanUp();
+cst.assertNextChangesEqual({cursor: changeStreamCursor, expectedChanges: expectedChanges});
+cst.cleanUp();
})(); \ No newline at end of file
diff --git a/jstests/change_streams/error_label.js b/jstests/change_streams/error_label.js
index 1c9a00db356..93a8e569d25 100644
--- a/jstests/change_streams/error_label.js
+++ b/jstests/change_streams/error_label.js
@@ -4,30 +4,30 @@
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection.
+load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection.
- // Drop and recreate the collections to be used in this set of tests.
- const coll = assertDropAndRecreateCollection(db, "change_stream_error_label");
+// Drop and recreate the collections to be used in this set of tests.
+const coll = assertDropAndRecreateCollection(db, "change_stream_error_label");
- // Attaching a projection to the Change Stream that filters out the resume token (stored in the
- // _id field) guarantees a ChangeStreamFatalError as soon as we get the first change.
- const changeStream = coll.watch([{$project: {_id: 0}}], {batchSize: 1});
- assert.commandWorked(coll.insert({a: 1}));
+// Attaching a projection to the Change Stream that filters out the resume token (stored in the
+// _id field) guarantees a ChangeStreamFatalError as soon as we get the first change.
+const changeStream = coll.watch([{$project: {_id: 0}}], {batchSize: 1});
+assert.commandWorked(coll.insert({a: 1}));
- const err = assert.throws(function() {
- // Call hasNext() until it throws an error or unexpectedly returns true. We need the
- // assert.soon() to keep trying here, because the above insert command isn't immediately
- // observable to the change stream in sharded configurations.
- assert.soon(function() {
- return changeStream.hasNext();
- });
+const err = assert.throws(function() {
+ // Call hasNext() until it throws an error or unexpectedly returns true. We need the
+ // assert.soon() to keep trying here, because the above insert command isn't immediately
+ // observable to the change stream in sharded configurations.
+ assert.soon(function() {
+ return changeStream.hasNext();
});
+});
- // The hasNext() sends a getMore command, which should generate a ChangeStreamFatalError reply
- // that includes the NonResumableChangeStreamError errorLabel.
- assert.commandFailedWithCode(err, ErrorCodes.ChangeStreamFatalError);
- assert("errorLabels" in err, err);
- assert.contains("NonResumableChangeStreamError", err.errorLabels, err);
+// The hasNext() sends a getMore command, which should generate a ChangeStreamFatalError reply
+// that includes the NonResumableChangeStreamError errorLabel.
+assert.commandFailedWithCode(err, ErrorCodes.ChangeStreamFatalError);
+assert("errorLabels" in err, err);
+assert.contains("NonResumableChangeStreamError", err.errorLabels, err);
}()); \ No newline at end of file
diff --git a/jstests/change_streams/include_cluster_time.js b/jstests/change_streams/include_cluster_time.js
index d035a92f517..dcefe40d062 100644
--- a/jstests/change_streams/include_cluster_time.js
+++ b/jstests/change_streams/include_cluster_time.js
@@ -5,58 +5,56 @@
// based on the commit oplog entry, which would cause this test to fail.
// @tags: [change_stream_does_not_expect_txns]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/change_stream_util.js"); // For assertInvalidateOp.
- load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection.
+load("jstests/libs/change_stream_util.js"); // For assertInvalidateOp.
+load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection.
- // Drop and recreate the collections to be used in this set of tests.
- const coll = assertDropAndRecreateCollection(db, "include_cluster_time");
+// Drop and recreate the collections to be used in this set of tests.
+const coll = assertDropAndRecreateCollection(db, "include_cluster_time");
- const changeStream = coll.watch();
+const changeStream = coll.watch();
- const insertClusterTime =
- assert.commandWorked(coll.runCommand("insert", {documents: [{_id: 0}]})).operationTime;
+const insertClusterTime =
+ assert.commandWorked(coll.runCommand("insert", {documents: [{_id: 0}]})).operationTime;
- const updateClusterTime =
- assert
- .commandWorked(
- coll.runCommand("update", {updates: [{q: {_id: 0}, u: {$set: {updated: true}}}]}))
- .operationTime;
+const updateClusterTime = assert
+ .commandWorked(coll.runCommand(
+ "update", {updates: [{q: {_id: 0}, u: {$set: {updated: true}}}]}))
+ .operationTime;
- const deleteClusterTime =
- assert.commandWorked(coll.runCommand("delete", {deletes: [{q: {_id: 0}, limit: 1}]}))
- .operationTime;
+const deleteClusterTime =
+ assert.commandWorked(coll.runCommand("delete", {deletes: [{q: {_id: 0}, limit: 1}]}))
+ .operationTime;
- const dropClusterTime =
- assert.commandWorked(db.runCommand({drop: coll.getName()})).operationTime;
+const dropClusterTime = assert.commandWorked(db.runCommand({drop: coll.getName()})).operationTime;
- // Make sure each operation has a reasonable cluster time. Note that we should not assert
- // that the cluster times are equal, because the cluster time returned from the command is
- // generated by a second, independent read of the logical clock than the one used to
- // generate the oplog entry. It's possible that the system did something to advance the time
- // between the two reads of the clock.
- assert.soon(() => changeStream.hasNext());
- let next = changeStream.next();
- assert.eq(next.operationType, "insert");
- assert.lte(next.clusterTime, insertClusterTime);
+// Make sure each operation has a reasonable cluster time. Note that we should not assert
+// that the cluster times are equal, because the cluster time returned from the command is
+// generated by a second, independent read of the logical clock than the one used to
+// generate the oplog entry. It's possible that the system did something to advance the time
+// between the two reads of the clock.
+assert.soon(() => changeStream.hasNext());
+let next = changeStream.next();
+assert.eq(next.operationType, "insert");
+assert.lte(next.clusterTime, insertClusterTime);
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "update");
- assert.lte(next.clusterTime, updateClusterTime);
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "update");
+assert.lte(next.clusterTime, updateClusterTime);
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "delete");
- assert.lte(next.clusterTime, deleteClusterTime);
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "delete");
+assert.lte(next.clusterTime, deleteClusterTime);
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "drop");
- assert.lte(next.clusterTime, dropClusterTime);
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "drop");
+assert.lte(next.clusterTime, dropClusterTime);
- assertInvalidateOp({cursor: changeStream, opType: "drop"});
+assertInvalidateOp({cursor: changeStream, opType: "drop"});
- changeStream.close();
+changeStream.close();
}());
diff --git a/jstests/change_streams/lookup_post_image.js b/jstests/change_streams/lookup_post_image.js
index be267f5feea..fa2658ed6f8 100644
--- a/jstests/change_streams/lookup_post_image.js
+++ b/jstests/change_streams/lookup_post_image.js
@@ -6,244 +6,241 @@
// uses_multiple_connections,
// ]
(function() {
- "use strict";
-
- load("jstests/libs/change_stream_util.js");
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- load("jstests/replsets/libs/two_phase_drops.js"); // For 'TwoPhaseDropCollectionTest'.
-
- const coll = assertDropAndRecreateCollection(db, "change_post_image");
- const cst = new ChangeStreamTest(db);
-
- jsTestLog("Testing change streams without 'fullDocument' specified");
- // Test that not specifying 'fullDocument' does include a 'fullDocument' in the result for
- // an insert.
- let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: coll});
- assert.writeOK(coll.insert({_id: "fullDocument not specified"}));
- let latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "insert");
- assert.eq(latestChange.fullDocument, {_id: "fullDocument not specified"});
-
- // Test that not specifying 'fullDocument' does include a 'fullDocument' in the result for a
- // replacement-style update.
- assert.writeOK(coll.update({_id: "fullDocument not specified"},
- {_id: "fullDocument not specified", replaced: true}));
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "replace");
- assert.eq(latestChange.fullDocument, {_id: "fullDocument not specified", replaced: true});
-
- // Test that not specifying 'fullDocument' does not include a 'fullDocument' in the result
- // for a non-replacement update.
- assert.writeOK(coll.update({_id: "fullDocument not specified"}, {$set: {updated: true}}));
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "update");
- assert(!latestChange.hasOwnProperty("fullDocument"));
-
- jsTestLog("Testing change streams with 'fullDocument' specified as 'default'");
-
- // Test that specifying 'fullDocument' as 'default' does include a 'fullDocument' in the
- // result for an insert.
- cursor = cst.startWatchingChanges(
- {collection: coll, pipeline: [{$changeStream: {fullDocument: "default"}}]});
- assert.writeOK(coll.insert({_id: "fullDocument is default"}));
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "insert");
- assert.eq(latestChange.fullDocument, {_id: "fullDocument is default"});
-
- // Test that specifying 'fullDocument' as 'default' does include a 'fullDocument' in the
- // result for a replacement-style update.
- assert.writeOK(coll.update({_id: "fullDocument is default"},
- {_id: "fullDocument is default", replaced: true}));
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "replace");
- assert.eq(latestChange.fullDocument, {_id: "fullDocument is default", replaced: true});
-
- // Test that specifying 'fullDocument' as 'default' does not include a 'fullDocument' in the
- // result for a non-replacement update.
- assert.writeOK(coll.update({_id: "fullDocument is default"}, {$set: {updated: true}}));
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "update");
- assert(!latestChange.hasOwnProperty("fullDocument"));
-
- jsTestLog("Testing change streams with 'fullDocument' specified as 'updateLookup'");
-
- // Test that specifying 'fullDocument' as 'updateLookup' does include a 'fullDocument' in
- // the result for an insert.
- cursor = cst.startWatchingChanges(
- {collection: coll, pipeline: [{$changeStream: {fullDocument: "updateLookup"}}]});
- assert.writeOK(coll.insert({_id: "fullDocument is lookup"}));
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "insert");
- assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup"});
-
- // Test that specifying 'fullDocument' as 'updateLookup' does include a 'fullDocument' in
- // the result for a replacement-style update.
- assert.writeOK(coll.update({_id: "fullDocument is lookup"},
- {_id: "fullDocument is lookup", replaced: true}));
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "replace");
- assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup", replaced: true});
-
- // Test that specifying 'fullDocument' as 'updateLookup' does include a 'fullDocument' in
- // the result for a non-replacement update.
- assert.writeOK(coll.update({_id: "fullDocument is lookup"}, {$set: {updated: true}}));
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "update");
- assert.eq(latestChange.fullDocument,
- {_id: "fullDocument is lookup", replaced: true, updated: true});
-
- // Test that looking up the post image of an update after deleting the document will result
- // in a 'fullDocument' with a value of null.
- cursor = cst.startWatchingChanges({
- collection: coll,
- pipeline: [
- {$changeStream: {fullDocument: "updateLookup"}},
- {$match: {operationType: "update"}}
- ]
- });
- assert.writeOK(coll.update({_id: "fullDocument is lookup"}, {$set: {updatedAgain: true}}));
- assert.writeOK(coll.remove({_id: "fullDocument is lookup"}));
- // If this test is running with secondary read preference, it's necessary for the remove
- // to propagate to all secondary nodes and be available for majority reads before we can
- // assume looking up the document will fail.
- FixtureHelpers.awaitLastOpCommitted(db);
-
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "update");
- assert(latestChange.hasOwnProperty("fullDocument"));
- assert.eq(latestChange.fullDocument, null);
- const deleteDocResumePoint = latestChange._id;
-
- // Test that looking up the post image of an update after the collection has been dropped
- // will result in 'fullDocument' with a value of null. This must be done using getMore
- // because new cursors cannot be established after a collection drop.
- assert.writeOK(coll.insert({_id: "fullDocument is lookup 2"}));
- assert.writeOK(coll.update({_id: "fullDocument is lookup 2"}, {$set: {updated: true}}));
-
- // Open a $changeStream cursor with batchSize 0, so that no oplog entries are retrieved yet.
- cursor = cst.startWatchingChanges({
- collection: coll,
- pipeline: [
- {$changeStream: {fullDocument: "updateLookup", resumeAfter: deleteDocResumePoint}},
- {$match: {operationType: {$ne: "delete"}}}
- ],
- aggregateOptions: {cursor: {batchSize: 0}}
- });
-
- // Save another stream to test post-image lookup after the collection is recreated.
- const cursorBeforeDrop = cst.startWatchingChanges({
- collection: coll,
- pipeline: [
- {$changeStream: {fullDocument: "updateLookup", resumeAfter: deleteDocResumePoint}},
- {$match: {operationType: {$ne: "delete"}}}
- ],
- aggregateOptions: {cursor: {batchSize: 0}}
- });
-
- // Retrieve the 'insert' operation from the latter stream. This is necessary on a sharded
- // collection so that the documentKey is retrieved before the collection is recreated;
- // otherwise, per SERVER-31691, a uassert will occur.
- latestChange = cst.getOneChange(cursorBeforeDrop);
- assert.eq(latestChange.operationType, "insert");
- assert(latestChange.hasOwnProperty("fullDocument"));
- assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup 2"});
-
- // Drop the collection and wait until two-phase drop finishes.
- assertDropCollection(db, coll.getName());
- assert.soon(function() {
- return !TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(db, coll.getName());
- });
- // If this test is running with secondary read preference, it's necessary for the drop
- // to propagate to all secondary nodes and be available for majority reads before we can
- // assume looking up the document will fail.
- FixtureHelpers.awaitLastOpCommitted(db);
-
- // Check the next $changeStream entry; this is the test document inserted above.
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "insert");
- assert(latestChange.hasOwnProperty("fullDocument"));
- assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup 2"});
-
- // The next entry is the 'update' operation. Because the collection has been dropped, our
- // attempt to look up the post-image results in a null document.
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "update");
- assert(latestChange.hasOwnProperty("fullDocument"));
- assert.eq(latestChange.fullDocument, null);
-
- // Test that we can resume a change stream with 'fullDocument: updateLookup' after the
- // collection has been dropped. This is only allowed if an explicit collation is provided.
- cursor = cst.startWatchingChanges({
- collection: coll,
- pipeline: [
- {$changeStream: {resumeAfter: deleteDocResumePoint, fullDocument: "updateLookup"}},
- {$match: {operationType: {$ne: "delete"}}}
- ],
- aggregateOptions: {cursor: {batchSize: 0}, collation: {locale: "simple"}}
- });
-
- // Check the next $changeStream entry; this is the test document inserted above.
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "insert");
- assert(latestChange.hasOwnProperty("fullDocument"));
- assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup 2"});
-
- // The next entry is the 'update' operation. Because the collection has been dropped, our
- // attempt to look up the post-image results in a null document.
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "update");
- assert(latestChange.hasOwnProperty("fullDocument"));
- assert.eq(latestChange.fullDocument, null);
-
- // Test that looking up the post image of an update after the collection has been dropped
- // and created again will result in 'fullDocument' with a value of null. This must be done
- // using getMore because new cursors cannot be established after a collection drop.
-
- // Insert a document with the same _id, verify the change stream won't return it due to
- // different UUID.
- assertCreateCollection(db, coll.getName());
- assert.writeOK(coll.insert({_id: "fullDocument is lookup 2"}));
-
- // Confirm that the next entry's post-image is null since new collection has a different
- // UUID.
- latestChange = cst.getOneChange(cursorBeforeDrop);
- assert.eq(latestChange.operationType, "update");
- assert(latestChange.hasOwnProperty("fullDocument"));
- assert.eq(latestChange.fullDocument, null);
-
- jsTestLog("Testing full document lookup with a real getMore");
- assert.writeOK(coll.insert({_id: "getMoreEnabled"}));
-
- cursor = cst.startWatchingChanges({
- collection: coll,
- pipeline: [{$changeStream: {fullDocument: "updateLookup"}}],
- });
- assert.writeOK(coll.update({_id: "getMoreEnabled"}, {$set: {updated: true}}));
-
- const doc = cst.getOneChange(cursor);
- assert.docEq(doc["fullDocument"], {_id: "getMoreEnabled", updated: true});
-
- // Test that invalidate entries don't have 'fullDocument' even if 'updateLookup' is
- // specified.
- cursor = cst.startWatchingChanges({
- collection: coll,
- pipeline: [{$changeStream: {fullDocument: "updateLookup"}}],
- aggregateOptions: {cursor: {batchSize: 0}}
- });
- assert.writeOK(coll.insert({_id: "testing invalidate"}));
- assertDropCollection(db, coll.getName());
- // Wait until two-phase drop finishes.
- assert.soon(function() {
- return !TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(db, coll.getName());
- });
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "insert");
- latestChange = cst.getOneChange(cursor);
- assert.eq(latestChange.operationType, "drop");
- // Only single-collection change streams will be invalidated by the drop.
- if (!isChangeStreamPassthrough()) {
- latestChange = cst.getOneChange(cursor, true);
- assert.eq(latestChange.operationType, "invalidate");
- }
+"use strict";
+
+load("jstests/libs/change_stream_util.js");
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/replsets/libs/two_phase_drops.js"); // For 'TwoPhaseDropCollectionTest'.
+
+const coll = assertDropAndRecreateCollection(db, "change_post_image");
+const cst = new ChangeStreamTest(db);
+
+jsTestLog("Testing change streams without 'fullDocument' specified");
+// Test that not specifying 'fullDocument' does include a 'fullDocument' in the result for
+// an insert.
+let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: coll});
+assert.writeOK(coll.insert({_id: "fullDocument not specified"}));
+let latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "insert");
+assert.eq(latestChange.fullDocument, {_id: "fullDocument not specified"});
+
+// Test that not specifying 'fullDocument' does include a 'fullDocument' in the result for a
+// replacement-style update.
+assert.writeOK(coll.update({_id: "fullDocument not specified"},
+ {_id: "fullDocument not specified", replaced: true}));
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "replace");
+assert.eq(latestChange.fullDocument, {_id: "fullDocument not specified", replaced: true});
+
+// Test that not specifying 'fullDocument' does not include a 'fullDocument' in the result
+// for a non-replacement update.
+assert.writeOK(coll.update({_id: "fullDocument not specified"}, {$set: {updated: true}}));
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "update");
+assert(!latestChange.hasOwnProperty("fullDocument"));
+
+jsTestLog("Testing change streams with 'fullDocument' specified as 'default'");
+
+// Test that specifying 'fullDocument' as 'default' does include a 'fullDocument' in the
+// result for an insert.
+cursor = cst.startWatchingChanges(
+ {collection: coll, pipeline: [{$changeStream: {fullDocument: "default"}}]});
+assert.writeOK(coll.insert({_id: "fullDocument is default"}));
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "insert");
+assert.eq(latestChange.fullDocument, {_id: "fullDocument is default"});
+
+// Test that specifying 'fullDocument' as 'default' does include a 'fullDocument' in the
+// result for a replacement-style update.
+assert.writeOK(coll.update({_id: "fullDocument is default"},
+ {_id: "fullDocument is default", replaced: true}));
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "replace");
+assert.eq(latestChange.fullDocument, {_id: "fullDocument is default", replaced: true});
+
+// Test that specifying 'fullDocument' as 'default' does not include a 'fullDocument' in the
+// result for a non-replacement update.
+assert.writeOK(coll.update({_id: "fullDocument is default"}, {$set: {updated: true}}));
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "update");
+assert(!latestChange.hasOwnProperty("fullDocument"));
+
+jsTestLog("Testing change streams with 'fullDocument' specified as 'updateLookup'");
+
+// Test that specifying 'fullDocument' as 'updateLookup' does include a 'fullDocument' in
+// the result for an insert.
+cursor = cst.startWatchingChanges(
+ {collection: coll, pipeline: [{$changeStream: {fullDocument: "updateLookup"}}]});
+assert.writeOK(coll.insert({_id: "fullDocument is lookup"}));
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "insert");
+assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup"});
+
+// Test that specifying 'fullDocument' as 'updateLookup' does include a 'fullDocument' in
+// the result for a replacement-style update.
+assert.writeOK(
+ coll.update({_id: "fullDocument is lookup"}, {_id: "fullDocument is lookup", replaced: true}));
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "replace");
+assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup", replaced: true});
+
+// Test that specifying 'fullDocument' as 'updateLookup' does include a 'fullDocument' in
+// the result for a non-replacement update.
+assert.writeOK(coll.update({_id: "fullDocument is lookup"}, {$set: {updated: true}}));
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "update");
+assert.eq(latestChange.fullDocument,
+ {_id: "fullDocument is lookup", replaced: true, updated: true});
+
+// Test that looking up the post image of an update after deleting the document will result
+// in a 'fullDocument' with a value of null.
+cursor = cst.startWatchingChanges({
+ collection: coll,
+ pipeline: [{$changeStream: {fullDocument: "updateLookup"}}, {$match: {operationType: "update"}}]
+});
+assert.writeOK(coll.update({_id: "fullDocument is lookup"}, {$set: {updatedAgain: true}}));
+assert.writeOK(coll.remove({_id: "fullDocument is lookup"}));
+// If this test is running with secondary read preference, it's necessary for the remove
+// to propagate to all secondary nodes and be available for majority reads before we can
+// assume looking up the document will fail.
+FixtureHelpers.awaitLastOpCommitted(db);
+
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "update");
+assert(latestChange.hasOwnProperty("fullDocument"));
+assert.eq(latestChange.fullDocument, null);
+const deleteDocResumePoint = latestChange._id;
+
+// Test that looking up the post image of an update after the collection has been dropped
+// will result in 'fullDocument' with a value of null. This must be done using getMore
+// because new cursors cannot be established after a collection drop.
+assert.writeOK(coll.insert({_id: "fullDocument is lookup 2"}));
+assert.writeOK(coll.update({_id: "fullDocument is lookup 2"}, {$set: {updated: true}}));
+
+// Open a $changeStream cursor with batchSize 0, so that no oplog entries are retrieved yet.
+cursor = cst.startWatchingChanges({
+ collection: coll,
+ pipeline: [
+ {$changeStream: {fullDocument: "updateLookup", resumeAfter: deleteDocResumePoint}},
+ {$match: {operationType: {$ne: "delete"}}}
+ ],
+ aggregateOptions: {cursor: {batchSize: 0}}
+});
+
+// Save another stream to test post-image lookup after the collection is recreated.
+const cursorBeforeDrop = cst.startWatchingChanges({
+ collection: coll,
+ pipeline: [
+ {$changeStream: {fullDocument: "updateLookup", resumeAfter: deleteDocResumePoint}},
+ {$match: {operationType: {$ne: "delete"}}}
+ ],
+ aggregateOptions: {cursor: {batchSize: 0}}
+});
+
+// Retrieve the 'insert' operation from the latter stream. This is necessary on a sharded
+// collection so that the documentKey is retrieved before the collection is recreated;
+// otherwise, per SERVER-31691, a uassert will occur.
+latestChange = cst.getOneChange(cursorBeforeDrop);
+assert.eq(latestChange.operationType, "insert");
+assert(latestChange.hasOwnProperty("fullDocument"));
+assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup 2"});
+
+// Drop the collection and wait until two-phase drop finishes.
+assertDropCollection(db, coll.getName());
+assert.soon(function() {
+ return !TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(db, coll.getName());
+});
+// If this test is running with secondary read preference, it's necessary for the drop
+// to propagate to all secondary nodes and be available for majority reads before we can
+// assume looking up the document will fail.
+FixtureHelpers.awaitLastOpCommitted(db);
+
+// Check the next $changeStream entry; this is the test document inserted above.
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "insert");
+assert(latestChange.hasOwnProperty("fullDocument"));
+assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup 2"});
+
+// The next entry is the 'update' operation. Because the collection has been dropped, our
+// attempt to look up the post-image results in a null document.
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "update");
+assert(latestChange.hasOwnProperty("fullDocument"));
+assert.eq(latestChange.fullDocument, null);
+
+// Test that we can resume a change stream with 'fullDocument: updateLookup' after the
+// collection has been dropped. This is only allowed if an explicit collation is provided.
+cursor = cst.startWatchingChanges({
+ collection: coll,
+ pipeline: [
+ {$changeStream: {resumeAfter: deleteDocResumePoint, fullDocument: "updateLookup"}},
+ {$match: {operationType: {$ne: "delete"}}}
+ ],
+ aggregateOptions: {cursor: {batchSize: 0}, collation: {locale: "simple"}}
+});
+
+// Check the next $changeStream entry; this is the test document inserted above.
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "insert");
+assert(latestChange.hasOwnProperty("fullDocument"));
+assert.eq(latestChange.fullDocument, {_id: "fullDocument is lookup 2"});
+
+// The next entry is the 'update' operation. Because the collection has been dropped, our
+// attempt to look up the post-image results in a null document.
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "update");
+assert(latestChange.hasOwnProperty("fullDocument"));
+assert.eq(latestChange.fullDocument, null);
+
+// Test that looking up the post image of an update after the collection has been dropped
+// and created again will result in 'fullDocument' with a value of null. This must be done
+// using getMore because new cursors cannot be established after a collection drop.
+
+// Insert a document with the same _id, verify the change stream won't return it due to
+// different UUID.
+assertCreateCollection(db, coll.getName());
+assert.writeOK(coll.insert({_id: "fullDocument is lookup 2"}));
+
+// Confirm that the next entry's post-image is null since new collection has a different
+// UUID.
+latestChange = cst.getOneChange(cursorBeforeDrop);
+assert.eq(latestChange.operationType, "update");
+assert(latestChange.hasOwnProperty("fullDocument"));
+assert.eq(latestChange.fullDocument, null);
+
+jsTestLog("Testing full document lookup with a real getMore");
+assert.writeOK(coll.insert({_id: "getMoreEnabled"}));
+
+cursor = cst.startWatchingChanges({
+ collection: coll,
+ pipeline: [{$changeStream: {fullDocument: "updateLookup"}}],
+});
+assert.writeOK(coll.update({_id: "getMoreEnabled"}, {$set: {updated: true}}));
+
+const doc = cst.getOneChange(cursor);
+assert.docEq(doc["fullDocument"], {_id: "getMoreEnabled", updated: true});
+
+// Test that invalidate entries don't have 'fullDocument' even if 'updateLookup' is
+// specified.
+cursor = cst.startWatchingChanges({
+ collection: coll,
+ pipeline: [{$changeStream: {fullDocument: "updateLookup"}}],
+ aggregateOptions: {cursor: {batchSize: 0}}
+});
+assert.writeOK(coll.insert({_id: "testing invalidate"}));
+assertDropCollection(db, coll.getName());
+// Wait until two-phase drop finishes.
+assert.soon(function() {
+ return !TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(db, coll.getName());
+});
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "insert");
+latestChange = cst.getOneChange(cursor);
+assert.eq(latestChange.operationType, "drop");
+// Only single-collection change streams will be invalidated by the drop.
+if (!isChangeStreamPassthrough()) {
+ latestChange = cst.getOneChange(cursor, true);
+ assert.eq(latestChange.operationType, "invalidate");
+}
}());
diff --git a/jstests/change_streams/metadata_notifications.js b/jstests/change_streams/metadata_notifications.js
index 4d1f29abf2a..8b3aae094fe 100644
--- a/jstests/change_streams/metadata_notifications.js
+++ b/jstests/change_streams/metadata_notifications.js
@@ -3,121 +3,198 @@
// invalidated by a database drop.
// @tags: [do_not_run_in_whole_cluster_passthrough]
(function() {
- "use strict";
-
- load("jstests/libs/change_stream_util.js");
- load('jstests/replsets/libs/two_phase_drops.js'); // For 'TwoPhaseDropCollectionTest'.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/fixture_helpers.js"); // For isSharded.
-
- db = db.getSiblingDB(jsTestName());
- let cst = new ChangeStreamTest(db);
-
- db.getMongo().forceReadMode('commands');
-
- // Test that it is possible to open a new change stream cursor on a collection that does not
- // exist.
- const collName = "test";
- assertDropCollection(db, collName);
-
- // Asserts that resuming a change stream with 'spec' and an explicit simple collation returns
- // the results specified by 'expected'.
- function assertResumeExpected({coll, spec, expected}) {
- const cursor = cst.startWatchingChanges({
- collection: coll,
- pipeline: [{$changeStream: spec}],
- aggregateOptions: {collation: {locale: "simple"}}
- });
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
- }
-
- // Cursor creation succeeds, but there are no results. We do not expect to see a notification
- // for collection creation.
- let cursor = cst.startWatchingChanges(
- {collection: collName, pipeline: [{$changeStream: {}}, {$project: {operationType: 1}}]});
-
- // We explicitly test getMore, to ensure that the getMore command for a non-existent collection
- // does not return an error.
- let change = cst.getNextBatch(cursor);
- assert.neq(change.id, 0);
- assert.eq(change.nextBatch.length, 0, tojson(change.nextBatch));
-
- // Dropping the empty database should not generate any notification for the change stream, since
- // the collection does not exist yet.
- assert.commandWorked(db.dropDatabase());
- change = cst.getNextBatch(cursor);
- assert.neq(change.id, 0);
- assert.eq(change.nextBatch.length, 0, tojson(change.nextBatch));
-
- // After collection creation, we expect to see oplog entries for each subsequent operation.
- let coll = assertCreateCollection(db, collName);
- assert.writeOK(coll.insert({_id: 0}));
-
- // Determine the number of shards that the collection is distributed across.
- const numShards = FixtureHelpers.numberOfShardsForCollection(coll);
-
- change = cst.getOneChange(cursor);
- assert.eq(change.operationType, "insert", tojson(change));
-
- // Create oplog entries of type insert, update, delete, and drop.
- assert.writeOK(coll.insert({_id: 1}));
- assert.writeOK(coll.update({_id: 1}, {$set: {a: 1}}));
- assert.writeOK(coll.remove({_id: 1}));
- assertDropCollection(db, coll.getName());
-
- // We should get oplog entries of type insert, update, delete, drop, and invalidate. The cursor
- // should be closed.
- let expectedChanges = [
- {operationType: "insert"},
- {operationType: "update"},
- {operationType: "delete"},
- {operationType: "drop"},
- {operationType: "invalidate"},
- ];
- let changes = cst.assertNextChangesEqual(
- {cursor: cursor, expectedChanges: expectedChanges, expectInvalidate: true});
- const resumeToken = changes[0]._id;
- const resumeTokenDrop = changes[3]._id;
- const resumeTokenInvalidate = changes[4]._id;
-
- // Verify we can startAfter the invalidate. We should see one drop event for every other shard
- // that the collection was present on, or nothing if the collection was not sharded. This test
- // exercises the bug described in SERVER-41196.
- const restartedStream = coll.watch([], {startAfter: resumeTokenInvalidate});
- for (let i = 0; i < numShards - 1; ++i) {
- assert.soon(() => restartedStream.hasNext());
- const nextEvent = restartedStream.next();
- assert.eq(nextEvent.operationType, "drop", () => tojson(nextEvent));
- }
- assert(!restartedStream.hasNext(), () => tojson(restartedStream.next()));
-
- // Verify that we can resume a stream after a collection drop without an explicit collation.
- assert.commandWorked(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
- cursor: {}
- }));
-
- // Recreate the collection.
- coll = assertCreateCollection(db, collName);
- assert.writeOK(coll.insert({_id: "after recreate"}));
-
- // Test resuming the change stream from the collection drop using 'resumeAfter'. If running in a
- // sharded passthrough suite, resuming from the drop will first return the drop from the other
- // shard before returning an invalidate.
- cursor = cst.startWatchingChanges({
+"use strict";
+
+load("jstests/libs/change_stream_util.js");
+load('jstests/replsets/libs/two_phase_drops.js'); // For 'TwoPhaseDropCollectionTest'.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/fixture_helpers.js"); // For isSharded.
+
+db = db.getSiblingDB(jsTestName());
+let cst = new ChangeStreamTest(db);
+
+db.getMongo().forceReadMode('commands');
+
+// Test that it is possible to open a new change stream cursor on a collection that does not
+// exist.
+const collName = "test";
+assertDropCollection(db, collName);
+
+// Asserts that resuming a change stream with 'spec' and an explicit simple collation returns
+// the results specified by 'expected'.
+function assertResumeExpected({coll, spec, expected}) {
+ const cursor = cst.startWatchingChanges({
collection: coll,
- pipeline: [{$changeStream: {resumeAfter: resumeTokenDrop}}],
- aggregateOptions: {collation: {locale: "simple"}, cursor: {batchSize: 0}}
- });
- cst.consumeDropUpTo({
- cursor: cursor,
- dropType: "drop",
- expectedNext: {operationType: "invalidate"},
- expectInvalidate: true
+ pipeline: [{$changeStream: spec}],
+ aggregateOptions: {collation: {locale: "simple"}}
});
+ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+}
+
+// Cursor creation succeeds, but there are no results. We do not expect to see a notification
+// for collection creation.
+let cursor = cst.startWatchingChanges(
+ {collection: collName, pipeline: [{$changeStream: {}}, {$project: {operationType: 1}}]});
+
+// We explicitly test getMore, to ensure that the getMore command for a non-existent collection
+// does not return an error.
+let change = cst.getNextBatch(cursor);
+assert.neq(change.id, 0);
+assert.eq(change.nextBatch.length, 0, tojson(change.nextBatch));
+
+// Dropping the empty database should not generate any notification for the change stream, since
+// the collection does not exist yet.
+assert.commandWorked(db.dropDatabase());
+change = cst.getNextBatch(cursor);
+assert.neq(change.id, 0);
+assert.eq(change.nextBatch.length, 0, tojson(change.nextBatch));
+
+// After collection creation, we expect to see oplog entries for each subsequent operation.
+let coll = assertCreateCollection(db, collName);
+assert.writeOK(coll.insert({_id: 0}));
+
+// Determine the number of shards that the collection is distributed across.
+const numShards = FixtureHelpers.numberOfShardsForCollection(coll);
+
+change = cst.getOneChange(cursor);
+assert.eq(change.operationType, "insert", tojson(change));
+
+// Create oplog entries of type insert, update, delete, and drop.
+assert.writeOK(coll.insert({_id: 1}));
+assert.writeOK(coll.update({_id: 1}, {$set: {a: 1}}));
+assert.writeOK(coll.remove({_id: 1}));
+assertDropCollection(db, coll.getName());
+
+// We should get oplog entries of type insert, update, delete, drop, and invalidate. The cursor
+// should be closed.
+let expectedChanges = [
+ {operationType: "insert"},
+ {operationType: "update"},
+ {operationType: "delete"},
+ {operationType: "drop"},
+ {operationType: "invalidate"},
+];
+let changes = cst.assertNextChangesEqual(
+ {cursor: cursor, expectedChanges: expectedChanges, expectInvalidate: true});
+const resumeToken = changes[0]._id;
+const resumeTokenDrop = changes[3]._id;
+const resumeTokenInvalidate = changes[4]._id;
+
+// Verify we can startAfter the invalidate. We should see one drop event for every other shard
+// that the collection was present on, or nothing if the collection was not sharded. This test
+// exercises the bug described in SERVER-41196.
+const restartedStream = coll.watch([], {startAfter: resumeTokenInvalidate});
+for (let i = 0; i < numShards - 1; ++i) {
+ assert.soon(() => restartedStream.hasNext());
+ const nextEvent = restartedStream.next();
+ assert.eq(nextEvent.operationType, "drop", () => tojson(nextEvent));
+}
+assert(!restartedStream.hasNext(), () => tojson(restartedStream.next()));
+
+// Verify that we can resume a stream after a collection drop without an explicit collation.
+assert.commandWorked(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$changeStream: {resumeAfter: resumeToken}}],
+ cursor: {}
+}));
+
+// Recreate the collection.
+coll = assertCreateCollection(db, collName);
+assert.writeOK(coll.insert({_id: "after recreate"}));
+
+// Test resuming the change stream from the collection drop using 'resumeAfter'. If running in a
+// sharded passthrough suite, resuming from the drop will first return the drop from the other
+// shard before returning an invalidate.
+cursor = cst.startWatchingChanges({
+ collection: coll,
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenDrop}}],
+ aggregateOptions: {collation: {locale: "simple"}, cursor: {batchSize: 0}}
+});
+cst.consumeDropUpTo({
+ cursor: cursor,
+ dropType: "drop",
+ expectedNext: {operationType: "invalidate"},
+ expectInvalidate: true
+});
+
+// Test resuming the change stream from the invalidate after the drop using 'resumeAfter'.
+assert.commandFailedWithCode(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenInvalidate}}],
+ cursor: {},
+ collation: {locale: "simple"},
+}),
+ ErrorCodes.InvalidResumeToken);
+
+// Test resuming the change stream from the collection drop using 'startAfter'.
+assertResumeExpected({
+ coll: coll.getName(),
+ spec: {startAfter: resumeTokenDrop},
+ expected: [{operationType: "invalidate"}]
+});
+
+// Test resuming the change stream from the 'invalidate' notification using 'startAfter'.
+cursor = cst.startWatchingChanges({
+ collection: coll,
+ pipeline: [{$changeStream: {startAfter: resumeTokenInvalidate}}],
+ aggregateOptions: {collation: {locale: "simple"}, cursor: {batchSize: 0}}
+});
+cst.consumeDropUpTo({
+ cursor: cursor,
+ dropType: "drop",
+ expectedNext: {
+ operationType: "insert",
+ ns: {db: db.getName(), coll: coll.getName()},
+ fullDocument: {_id: "after recreate"},
+ documentKey: {_id: "after recreate"}
+ },
+});
+
+// Test that renaming a collection being watched generates a "rename" entry followed by an
+// "invalidate". This is true if the change stream is on the source or target collection of the
+// rename. Sharded collections cannot be renamed.
+if (!FixtureHelpers.isSharded(coll)) {
+ cursor = cst.startWatchingChanges({collection: collName, pipeline: [{$changeStream: {}}]});
+ assertDropCollection(db, "renamed_coll");
+ assert.writeOK(coll.renameCollection("renamed_coll"));
+ expectedChanges = [
+ {
+ operationType: "rename",
+ ns: {db: db.getName(), coll: collName},
+ to: {db: db.getName(), coll: "renamed_coll"},
+ },
+ {operationType: "invalidate"}
+ ];
+ cst.assertNextChangesEqual(
+ {cursor: cursor, expectedChanges: expectedChanges, expectInvalidate: true});
+
+ coll = db["renamed_coll"];
- // Test resuming the change stream from the invalidate after the drop using 'resumeAfter'.
+ // Repeat the test, this time with a change stream open on the target.
+ cursor = cst.startWatchingChanges({collection: collName, pipeline: [{$changeStream: {}}]});
+ assert.writeOK(coll.renameCollection(collName));
+ expectedChanges = [
+ {
+ operationType: "rename",
+ ns: {db: db.getName(), coll: "renamed_coll"},
+ to: {db: db.getName(), coll: collName},
+ },
+ {operationType: "invalidate"}
+ ];
+ const changes = cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expectedChanges});
+ const resumeTokenRename = changes[0]._id;
+ const resumeTokenInvalidate = changes[1]._id;
+
+ coll = db[collName];
+ assert.writeOK(coll.insert({_id: "after rename"}));
+
+ // Test resuming the change stream from the collection rename using 'resumeAfter'.
+ assertResumeExpected({
+ coll: coll.getName(),
+ spec: {resumeAfter: resumeTokenRename},
+ expected: [{operationType: "invalidate"}]
+ });
+ // Test resuming the change stream from the invalidate after the rename using 'resumeAfter'.
assert.commandFailedWithCode(db.runCommand({
aggregate: coll.getName(),
pipeline: [{$changeStream: {resumeAfter: resumeTokenInvalidate}}],
@@ -126,154 +203,76 @@
}),
ErrorCodes.InvalidResumeToken);
- // Test resuming the change stream from the collection drop using 'startAfter'.
+ // Test resuming the change stream from the rename using 'startAfter'.
assertResumeExpected({
coll: coll.getName(),
- spec: {startAfter: resumeTokenDrop},
+ spec: {startAfter: resumeTokenRename},
expected: [{operationType: "invalidate"}]
});
- // Test resuming the change stream from the 'invalidate' notification using 'startAfter'.
- cursor = cst.startWatchingChanges({
- collection: coll,
- pipeline: [{$changeStream: {startAfter: resumeTokenInvalidate}}],
- aggregateOptions: {collation: {locale: "simple"}, cursor: {batchSize: 0}}
- });
- cst.consumeDropUpTo({
- cursor: cursor,
- dropType: "drop",
- expectedNext: {
- operationType: "insert",
- ns: {db: db.getName(), coll: coll.getName()},
- fullDocument: {_id: "after recreate"},
- documentKey: {_id: "after recreate"}
- },
+ // Test resuming the change stream from the invalidate after the rename using 'startAfter'.
+ expectedChanges = [{
+ operationType: "insert",
+ ns: {db: db.getName(), coll: coll.getName()},
+ fullDocument: {_id: "after rename"},
+ documentKey: {_id: "after rename"}
+ }];
+ assertResumeExpected({
+ coll: coll.getName(),
+ spec: {startAfter: resumeTokenInvalidate},
+ expected: expectedChanges
});
- // Test that renaming a collection being watched generates a "rename" entry followed by an
- // "invalidate". This is true if the change stream is on the source or target collection of the
- // rename. Sharded collections cannot be renamed.
- if (!FixtureHelpers.isSharded(coll)) {
- cursor = cst.startWatchingChanges({collection: collName, pipeline: [{$changeStream: {}}]});
- assertDropCollection(db, "renamed_coll");
- assert.writeOK(coll.renameCollection("renamed_coll"));
- expectedChanges = [
- {
- operationType: "rename",
- ns: {db: db.getName(), coll: collName},
- to: {db: db.getName(), coll: "renamed_coll"},
- },
- {operationType: "invalidate"}
- ];
- cst.assertNextChangesEqual(
- {cursor: cursor, expectedChanges: expectedChanges, expectInvalidate: true});
-
- coll = db["renamed_coll"];
-
- // Repeat the test, this time with a change stream open on the target.
- cursor = cst.startWatchingChanges({collection: collName, pipeline: [{$changeStream: {}}]});
- assert.writeOK(coll.renameCollection(collName));
- expectedChanges = [
- {
- operationType: "rename",
- ns: {db: db.getName(), coll: "renamed_coll"},
- to: {db: db.getName(), coll: collName},
- },
- {operationType: "invalidate"}
- ];
- const changes =
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expectedChanges});
- const resumeTokenRename = changes[0]._id;
- const resumeTokenInvalidate = changes[1]._id;
-
- coll = db[collName];
- assert.writeOK(coll.insert({_id: "after rename"}));
-
- // Test resuming the change stream from the collection rename using 'resumeAfter'.
- assertResumeExpected({
- coll: coll.getName(),
- spec: {resumeAfter: resumeTokenRename},
- expected: [{operationType: "invalidate"}]
- });
- // Test resuming the change stream from the invalidate after the rename using 'resumeAfter'.
- assert.commandFailedWithCode(db.runCommand({
- aggregate: coll.getName(),
- pipeline: [{$changeStream: {resumeAfter: resumeTokenInvalidate}}],
- cursor: {},
- collation: {locale: "simple"},
- }),
- ErrorCodes.InvalidResumeToken);
-
- // Test resuming the change stream from the rename using 'startAfter'.
- assertResumeExpected({
- coll: coll.getName(),
- spec: {startAfter: resumeTokenRename},
- expected: [{operationType: "invalidate"}]
- });
-
- // Test resuming the change stream from the invalidate after the rename using 'startAfter'.
- expectedChanges = [{
- operationType: "insert",
- ns: {db: db.getName(), coll: coll.getName()},
- fullDocument: {_id: "after rename"},
- documentKey: {_id: "after rename"}
- }];
- assertResumeExpected({
- coll: coll.getName(),
- spec: {startAfter: resumeTokenInvalidate},
- expected: expectedChanges
- });
-
- assertDropAndRecreateCollection(db, "renamed_coll");
- assert.writeOK(db.renamed_coll.insert({_id: 0}));
-
- // Repeat the test again, this time using the 'dropTarget' option with an existing target
- // collection.
- cursor =
- cst.startWatchingChanges({collection: "renamed_coll", pipeline: [{$changeStream: {}}]});
- assert.writeOK(coll.renameCollection("renamed_coll", true /* dropTarget */));
- expectedChanges = [
- {
- operationType: "rename",
- ns: {db: db.getName(), coll: collName},
- to: {db: db.getName(), coll: "renamed_coll"},
- },
- {operationType: "invalidate"}
- ];
- cst.assertNextChangesEqual(
- {cursor: cursor, expectedChanges: expectedChanges, expectInvalidate: true});
-
- coll = db["renamed_coll"];
-
- // Test the behavior of a change stream watching the target collection of a $out aggregation
- // stage.
- cursor = cst.startWatchingChanges({collection: collName, pipeline: [{$changeStream: {}}]});
- coll.aggregate([{$out: collName}]);
- // Note that $out will first create a temp collection, and then rename the temp collection
- // to the target. Do not explicitly check the 'ns' field.
- const rename = cst.getOneChange(cursor);
- assert.eq(rename.operationType, "rename", tojson(rename));
- assert.eq(rename.to, {db: db.getName(), coll: collName}, tojson(rename));
- assert.eq(cst.getOneChange(cursor, true).operationType, "invalidate");
- }
-
- // Test that dropping a database will first drop all of it's collections, invalidating any
- // change streams on those collections.
- cursor = cst.startWatchingChanges({
- collection: coll.getName(),
- pipeline: [{$changeStream: {}}],
- });
- assert.commandWorked(db.dropDatabase());
+ assertDropAndRecreateCollection(db, "renamed_coll");
+ assert.writeOK(db.renamed_coll.insert({_id: 0}));
+ // Repeat the test again, this time using the 'dropTarget' option with an existing target
+ // collection.
+ cursor =
+ cst.startWatchingChanges({collection: "renamed_coll", pipeline: [{$changeStream: {}}]});
+ assert.writeOK(coll.renameCollection("renamed_coll", true /* dropTarget */));
expectedChanges = [
{
- operationType: "drop",
- ns: {db: db.getName(), coll: coll.getName()},
+ operationType: "rename",
+ ns: {db: db.getName(), coll: collName},
+ to: {db: db.getName(), coll: "renamed_coll"},
},
{operationType: "invalidate"}
];
cst.assertNextChangesEqual(
{cursor: cursor, expectedChanges: expectedChanges, expectInvalidate: true});
- cst.cleanUp();
+ coll = db["renamed_coll"];
+
+ // Test the behavior of a change stream watching the target collection of a $out aggregation
+ // stage.
+ cursor = cst.startWatchingChanges({collection: collName, pipeline: [{$changeStream: {}}]});
+ coll.aggregate([{$out: collName}]);
+ // Note that $out will first create a temp collection, and then rename the temp collection
+ // to the target. Do not explicitly check the 'ns' field.
+ const rename = cst.getOneChange(cursor);
+ assert.eq(rename.operationType, "rename", tojson(rename));
+ assert.eq(rename.to, {db: db.getName(), coll: collName}, tojson(rename));
+ assert.eq(cst.getOneChange(cursor, true).operationType, "invalidate");
+}
+
+// Test that dropping a database will first drop all of it's collections, invalidating any
+// change streams on those collections.
+cursor = cst.startWatchingChanges({
+ collection: coll.getName(),
+ pipeline: [{$changeStream: {}}],
+});
+assert.commandWorked(db.dropDatabase());
+
+expectedChanges = [
+ {
+ operationType: "drop",
+ ns: {db: db.getName(), coll: coll.getName()},
+ },
+ {operationType: "invalidate"}
+];
+cst.assertNextChangesEqual(
+ {cursor: cursor, expectedChanges: expectedChanges, expectInvalidate: true});
+
+cst.cleanUp();
}());
diff --git a/jstests/change_streams/no_regex_leak.js b/jstests/change_streams/no_regex_leak.js
index e1e5f5484e1..a05207a22a4 100644
--- a/jstests/change_streams/no_regex_leak.js
+++ b/jstests/change_streams/no_regex_leak.js
@@ -3,59 +3,57 @@
* affect what documents appear in a changestream, in response to SERVER-41164.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/change_stream_util.js");
- load("jstests/libs/collection_drop_recreate.js");
- function test_no_leak(
- dbNameUnrelated, collNameUnrelated, dbNameProblematic, collNameProblematic) {
- const dbUnrelated = db.getSiblingDB(dbNameUnrelated);
- const cstUnrelated = new ChangeStreamTest(dbUnrelated);
- assertDropAndRecreateCollection(dbUnrelated, collNameUnrelated);
+load("jstests/libs/change_stream_util.js");
+load("jstests/libs/collection_drop_recreate.js");
+function test_no_leak(dbNameUnrelated, collNameUnrelated, dbNameProblematic, collNameProblematic) {
+ const dbUnrelated = db.getSiblingDB(dbNameUnrelated);
+ const cstUnrelated = new ChangeStreamTest(dbUnrelated);
+ assertDropAndRecreateCollection(dbUnrelated, collNameUnrelated);
- const watchUnrelated = cstUnrelated.startWatchingChanges(
- {pipeline: [{$changeStream: {}}], collection: collNameUnrelated});
+ const watchUnrelated = cstUnrelated.startWatchingChanges(
+ {pipeline: [{$changeStream: {}}], collection: collNameUnrelated});
- const dbProblematic = db.getSiblingDB(dbNameProblematic);
- const cstProblematic = new ChangeStreamTest(dbProblematic);
- assertDropAndRecreateCollection(dbProblematic, collNameProblematic);
+ const dbProblematic = db.getSiblingDB(dbNameProblematic);
+ const cstProblematic = new ChangeStreamTest(dbProblematic);
+ assertDropAndRecreateCollection(dbProblematic, collNameProblematic);
- const watchProblematic = cstProblematic.startWatchingChanges(
- {pipeline: [{$changeStream: {}}], collection: collNameProblematic});
+ const watchProblematic = cstProblematic.startWatchingChanges(
+ {pipeline: [{$changeStream: {}}], collection: collNameProblematic});
- assert.commandWorked(dbUnrelated.getCollection(collNameUnrelated).insert({_id: 2}));
- let expected = {
- documentKey: {_id: 2},
- fullDocument: {_id: 2},
- ns: {db: dbNameUnrelated, coll: collNameUnrelated},
- operationType: "insert",
- };
- // Make sure that only the database which was inserted into reflects a change on its
- // changestream.
- cstUnrelated.assertNextChangesEqual({cursor: watchUnrelated, expectedChanges: [expected]});
- // The other DB shouldn't have any changes.
- cstProblematic.assertNoChange(watchProblematic);
+ assert.commandWorked(dbUnrelated.getCollection(collNameUnrelated).insert({_id: 2}));
+ let expected = {
+ documentKey: {_id: 2},
+ fullDocument: {_id: 2},
+ ns: {db: dbNameUnrelated, coll: collNameUnrelated},
+ operationType: "insert",
+ };
+ // Make sure that only the database which was inserted into reflects a change on its
+ // changestream.
+ cstUnrelated.assertNextChangesEqual({cursor: watchUnrelated, expectedChanges: [expected]});
+ // The other DB shouldn't have any changes.
+ cstProblematic.assertNoChange(watchProblematic);
- assert.commandWorked(dbProblematic.getCollection(collNameProblematic).insert({_id: 3}));
- expected = {
- documentKey: {_id: 3},
- fullDocument: {_id: 3},
- ns: {db: dbNameProblematic, coll: collNameProblematic},
- operationType: "insert",
- };
- cstProblematic.assertNextChangesEqual(
- {cursor: watchProblematic, expectedChanges: [expected]});
- cstUnrelated.assertNoChange(watchUnrelated);
+ assert.commandWorked(dbProblematic.getCollection(collNameProblematic).insert({_id: 3}));
+ expected = {
+ documentKey: {_id: 3},
+ fullDocument: {_id: 3},
+ ns: {db: dbNameProblematic, coll: collNameProblematic},
+ operationType: "insert",
+ };
+ cstProblematic.assertNextChangesEqual({cursor: watchProblematic, expectedChanges: [expected]});
+ cstUnrelated.assertNoChange(watchUnrelated);
- cstUnrelated.cleanUp();
- cstProblematic.cleanUp();
- }
- if (!_isWindows()) {
- test_no_leak("has_no_pipe", "coll", "has_a_|pipe", "coll");
- test_no_leak("starssss", "coll", "stars*", "coll");
- }
- test_no_leak("has_[two]_brackets", "coll", "has_t_brackets", "coll");
- test_no_leak("test", "dotted.collection", "testadotted", "collection");
- test_no_leak("carat", "coll", "hasa^carat", "coll");
- test_no_leak("db1", "coll", "db1", "col*");
+ cstUnrelated.cleanUp();
+ cstProblematic.cleanUp();
+}
+if (!_isWindows()) {
+ test_no_leak("has_no_pipe", "coll", "has_a_|pipe", "coll");
+ test_no_leak("starssss", "coll", "stars*", "coll");
+}
+test_no_leak("has_[two]_brackets", "coll", "has_t_brackets", "coll");
+test_no_leak("test", "dotted.collection", "testadotted", "collection");
+test_no_leak("carat", "coll", "hasa^carat", "coll");
+test_no_leak("db1", "coll", "db1", "col*");
}());
diff --git a/jstests/change_streams/only_wake_getmore_for_relevant_changes.js b/jstests/change_streams/only_wake_getmore_for_relevant_changes.js
index 19f5433c8e9..16400360d55 100644
--- a/jstests/change_streams/only_wake_getmore_for_relevant_changes.js
+++ b/jstests/change_streams/only_wake_getmore_for_relevant_changes.js
@@ -4,33 +4,33 @@
// ]
// return early.
(function() {
- "use strict";
-
- load('jstests/libs/uuid_util.js');
- load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
-
- /**
- * Uses a parallel shell to execute the javascript function 'event' at the same time as an
- * awaitData getMore on the cursor with id 'awaitDataCursorId'. Returns the result of the
- * getMore, and the time it took to complete.
- *
- * Note that 'event' will not have access to any local variables, since it will be executed in a
- * different scope.
- */
- function runGetMoreInParallelWithEvent(
- {collection, awaitDataCursorId, identifyingComment, maxTimeMS, event}) {
- // In some extreme cases, the parallel shell can take longer to start up than it takes for
- // the getMore to run. To prevent this from happening, the main thread waits for an insert
- // into "sentinel", to signal that the parallel shell has started and is waiting for the
- // getMore to appear in currentOp.
- const port =
- (collection.stats().sharded ? collection.getMongo().port
- : FixtureHelpers.getPrimaryForNodeHostingDatabase(db).port);
-
- const sentinelCountBefore = shellSentinelCollection.find().itcount();
-
- const awaitShellDoingEventDuringGetMore = startParallelShell(`
+"use strict";
+
+load('jstests/libs/uuid_util.js');
+load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+
+/**
+ * Uses a parallel shell to execute the javascript function 'event' at the same time as an
+ * awaitData getMore on the cursor with id 'awaitDataCursorId'. Returns the result of the
+ * getMore, and the time it took to complete.
+ *
+ * Note that 'event' will not have access to any local variables, since it will be executed in a
+ * different scope.
+ */
+function runGetMoreInParallelWithEvent(
+ {collection, awaitDataCursorId, identifyingComment, maxTimeMS, event}) {
+ // In some extreme cases, the parallel shell can take longer to start up than it takes for
+ // the getMore to run. To prevent this from happening, the main thread waits for an insert
+ // into "sentinel", to signal that the parallel shell has started and is waiting for the
+ // getMore to appear in currentOp.
+ const port =
+ (collection.stats().sharded ? collection.getMongo().port
+ : FixtureHelpers.getPrimaryForNodeHostingDatabase(db).port);
+
+ const sentinelCountBefore = shellSentinelCollection.find().itcount();
+
+ const awaitShellDoingEventDuringGetMore = startParallelShell(`
// Signal that the parallel shell has started.
assert.writeOK(db.getCollection("${ shellSentinelCollection.getName() }").insert({}));
@@ -46,134 +46,133 @@ const eventFn = ${ event.toString() };
eventFn();`,
port);
- // Wait for the shell to start.
- assert.soon(() => shellSentinelCollection.find().itcount() > sentinelCountBefore);
-
- // Run and time the getMore.
- const startTime = (new Date()).getTime();
- const result = assert.commandWorked(db.runCommand(
- {getMore: awaitDataCursorId, collection: collection.getName(), maxTimeMS: maxTimeMS}));
- awaitShellDoingEventDuringGetMore();
- return {result: result, elapsedMs: (new Date()).getTime() - startTime};
- }
-
- /**
- * Asserts that a getMore of the cursor given by 'awaitDataCursorId' will not return after
- * 'event' is called, and will instead keep waiting until its maxTimeMS is expired.
- *
- * @param [Collection] collection - the collection to use in the getMore command.
- * @param [NumberLong] awaitDataCursorId - the id of the cursor to use in the getMore command.
- * @param [Function] event - the event that should be run during the getMore.
- */
- function assertEventDoesNotWakeCursor(
- {collection, awaitDataCursorId, identifyingComment, event}) {
- const {result, elapsedMs} = runGetMoreInParallelWithEvent({
- collection: collection,
- awaitDataCursorId: awaitDataCursorId,
- identifyingComment: identifyingComment,
- maxTimeMS: 1000,
- event: event,
- });
- // Should have waited for at least 'maxTimeMS'.
- assert.gt(elapsedMs, 900, "getMore returned before waiting for maxTimeMS");
- const cursorResponse = result.cursor;
- // Cursor should be valid with no data.
- assert.neq(cursorResponse.id, 0);
- assert.eq(cursorResponse.nextBatch.length, 0);
- }
-
- /**
- * Asserts that a getMore of the cursor given by 'awaitDataCursorId' will return soon after
- * 'event' is called, and returns the response from the getMore command.
- *
- * @param [Collection] collection - the collection to use in the getMore command.
- * @param [NumberLong] awaitDataCursorId - the id of the cursor to use in the getMore command.
- * @param [Function] event - the event that should be run during the getMore.
- */
- function assertEventWakesCursor({collection, awaitDataCursorId, identifyingComment, event}) {
- // Run the original event, then (while still in the parallel shell) assert that the getMore
- // finishes soon after. This will be run in a parallel shell, which will not have a variable
- // 'event' in scope, so we'll have to stringify it here.
- const thirtyMinutes = 30 * 60 * 1000;
- const fiveMinutes = 5 * 60 * 1000;
- const {result, elapsedMs} = runGetMoreInParallelWithEvent({
- collection: collection,
- awaitDataCursorId: awaitDataCursorId,
- identifyingComment: identifyingComment,
- maxTimeMS: thirtyMinutes,
- event: event,
- });
-
- assert.lt(elapsedMs, fiveMinutes);
-
- return result;
- }
-
- // Refresh all collections which will be required in the course of this test.
- const shellSentinelCollection = assertDropAndRecreateCollection(db, "shell_sentinel");
- const changesCollection = assertDropAndRecreateCollection(db, "changes");
- const unrelatedCollection = assertDropCollection(db, "unrelated_collection");
-
- // Start a change stream cursor.
- const wholeCollectionStreamComment = "change stream on entire collection";
- let res = assert.commandWorked(db.runCommand({
- aggregate: changesCollection.getName(),
- // Project out the resume token, since that's subject to change unpredictably.
- pipeline: [{$changeStream: {}}],
- cursor: {},
- comment: wholeCollectionStreamComment
- }));
- const changeCursorId = res.cursor.id;
- assert.neq(changeCursorId, 0);
- assert.eq(res.cursor.firstBatch.length, 0);
-
- // Test that an insert during a getMore will wake up the cursor and immediately return with the
- // new result.
- const getMoreResponse = assertEventWakesCursor({
- collection: changesCollection,
- awaitDataCursorId: changeCursorId,
- identifyingComment: wholeCollectionStreamComment,
- event: () => assert.writeOK(db.changes.insert({_id: "wake up"}))
+ // Wait for the shell to start.
+ assert.soon(() => shellSentinelCollection.find().itcount() > sentinelCountBefore);
+
+ // Run and time the getMore.
+ const startTime = (new Date()).getTime();
+ const result = assert.commandWorked(db.runCommand(
+ {getMore: awaitDataCursorId, collection: collection.getName(), maxTimeMS: maxTimeMS}));
+ awaitShellDoingEventDuringGetMore();
+ return {result: result, elapsedMs: (new Date()).getTime() - startTime};
+}
+
+/**
+ * Asserts that a getMore of the cursor given by 'awaitDataCursorId' will not return after
+ * 'event' is called, and will instead keep waiting until its maxTimeMS is expired.
+ *
+ * @param [Collection] collection - the collection to use in the getMore command.
+ * @param [NumberLong] awaitDataCursorId - the id of the cursor to use in the getMore command.
+ * @param [Function] event - the event that should be run during the getMore.
+ */
+function assertEventDoesNotWakeCursor({collection, awaitDataCursorId, identifyingComment, event}) {
+ const {result, elapsedMs} = runGetMoreInParallelWithEvent({
+ collection: collection,
+ awaitDataCursorId: awaitDataCursorId,
+ identifyingComment: identifyingComment,
+ maxTimeMS: 1000,
+ event: event,
});
- assert.eq(getMoreResponse.cursor.nextBatch.length, 1);
- assert.eq(getMoreResponse.cursor.nextBatch[0].operationType,
- "insert",
- tojson(getMoreResponse.cursor.nextBatch[0]));
- assert.eq(getMoreResponse.cursor.nextBatch[0].fullDocument,
- {_id: "wake up"},
- tojson(getMoreResponse.cursor.nextBatch[0]));
-
- // Test that an insert to an unrelated collection will not cause the change stream to wake up
- // and return an empty batch before reaching the maxTimeMS.
- assertEventDoesNotWakeCursor({
- collection: changesCollection,
- awaitDataCursorId: changeCursorId,
- identifyingComment: wholeCollectionStreamComment,
- event: () => assert.writeOK(db.unrelated_collection.insert({_id: "unrelated change"}))
+ // Should have waited for at least 'maxTimeMS'.
+ assert.gt(elapsedMs, 900, "getMore returned before waiting for maxTimeMS");
+ const cursorResponse = result.cursor;
+ // Cursor should be valid with no data.
+ assert.neq(cursorResponse.id, 0);
+ assert.eq(cursorResponse.nextBatch.length, 0);
+}
+
+/**
+ * Asserts that a getMore of the cursor given by 'awaitDataCursorId' will return soon after
+ * 'event' is called, and returns the response from the getMore command.
+ *
+ * @param [Collection] collection - the collection to use in the getMore command.
+ * @param [NumberLong] awaitDataCursorId - the id of the cursor to use in the getMore command.
+ * @param [Function] event - the event that should be run during the getMore.
+ */
+function assertEventWakesCursor({collection, awaitDataCursorId, identifyingComment, event}) {
+ // Run the original event, then (while still in the parallel shell) assert that the getMore
+ // finishes soon after. This will be run in a parallel shell, which will not have a variable
+ // 'event' in scope, so we'll have to stringify it here.
+ const thirtyMinutes = 30 * 60 * 1000;
+ const fiveMinutes = 5 * 60 * 1000;
+ const {result, elapsedMs} = runGetMoreInParallelWithEvent({
+ collection: collection,
+ awaitDataCursorId: awaitDataCursorId,
+ identifyingComment: identifyingComment,
+ maxTimeMS: thirtyMinutes,
+ event: event,
});
- assert.commandWorked(
- db.runCommand({killCursors: changesCollection.getName(), cursors: [changeCursorId]}));
-
- // Test that changes ignored by filtering in later stages of the pipeline will not cause the
- // cursor to return before the getMore has exceeded maxTimeMS.
- const noInvalidatesComment = "change stream filtering invalidate entries";
- res = assert.commandWorked(db.runCommand({
- aggregate: changesCollection.getName(),
- // This pipeline filters changes to only invalidates, so regular inserts should not cause
- // the awaitData to end early.
- pipeline: [{$changeStream: {}}, {$match: {operationType: "invalidate"}}],
- cursor: {},
- comment: noInvalidatesComment
- }));
- assert.eq(
- res.cursor.firstBatch.length, 0, "did not expect any invalidations on changes collection");
- assert.neq(res.cursor.id, 0);
- assertEventDoesNotWakeCursor({
- collection: changesCollection,
- awaitDataCursorId: res.cursor.id,
- identifyingComment: noInvalidatesComment,
- event: () => assert.writeOK(db.changes.insert({_id: "should not appear"}))
- });
- assert.commandWorked(
- db.runCommand({killCursors: changesCollection.getName(), cursors: [res.cursor.id]}));
+
+ assert.lt(elapsedMs, fiveMinutes);
+
+ return result;
+}
+
+// Refresh all collections which will be required in the course of this test.
+const shellSentinelCollection = assertDropAndRecreateCollection(db, "shell_sentinel");
+const changesCollection = assertDropAndRecreateCollection(db, "changes");
+const unrelatedCollection = assertDropCollection(db, "unrelated_collection");
+
+// Start a change stream cursor.
+const wholeCollectionStreamComment = "change stream on entire collection";
+let res = assert.commandWorked(db.runCommand({
+ aggregate: changesCollection.getName(),
+ // Project out the resume token, since that's subject to change unpredictably.
+ pipeline: [{$changeStream: {}}],
+ cursor: {},
+ comment: wholeCollectionStreamComment
+}));
+const changeCursorId = res.cursor.id;
+assert.neq(changeCursorId, 0);
+assert.eq(res.cursor.firstBatch.length, 0);
+
+// Test that an insert during a getMore will wake up the cursor and immediately return with the
+// new result.
+const getMoreResponse = assertEventWakesCursor({
+ collection: changesCollection,
+ awaitDataCursorId: changeCursorId,
+ identifyingComment: wholeCollectionStreamComment,
+ event: () => assert.writeOK(db.changes.insert({_id: "wake up"}))
+});
+assert.eq(getMoreResponse.cursor.nextBatch.length, 1);
+assert.eq(getMoreResponse.cursor.nextBatch[0].operationType,
+ "insert",
+ tojson(getMoreResponse.cursor.nextBatch[0]));
+assert.eq(getMoreResponse.cursor.nextBatch[0].fullDocument,
+ {_id: "wake up"},
+ tojson(getMoreResponse.cursor.nextBatch[0]));
+
+// Test that an insert to an unrelated collection will not cause the change stream to wake up
+// and return an empty batch before reaching the maxTimeMS.
+assertEventDoesNotWakeCursor({
+ collection: changesCollection,
+ awaitDataCursorId: changeCursorId,
+ identifyingComment: wholeCollectionStreamComment,
+ event: () => assert.writeOK(db.unrelated_collection.insert({_id: "unrelated change"}))
+});
+assert.commandWorked(
+ db.runCommand({killCursors: changesCollection.getName(), cursors: [changeCursorId]}));
+
+// Test that changes ignored by filtering in later stages of the pipeline will not cause the
+// cursor to return before the getMore has exceeded maxTimeMS.
+const noInvalidatesComment = "change stream filtering invalidate entries";
+res = assert.commandWorked(db.runCommand({
+ aggregate: changesCollection.getName(),
+ // This pipeline filters changes to only invalidates, so regular inserts should not cause
+ // the awaitData to end early.
+ pipeline: [{$changeStream: {}}, {$match: {operationType: "invalidate"}}],
+ cursor: {},
+ comment: noInvalidatesComment
+}));
+assert.eq(
+ res.cursor.firstBatch.length, 0, "did not expect any invalidations on changes collection");
+assert.neq(res.cursor.id, 0);
+assertEventDoesNotWakeCursor({
+ collection: changesCollection,
+ awaitDataCursorId: res.cursor.id,
+ identifyingComment: noInvalidatesComment,
+ event: () => assert.writeOK(db.changes.insert({_id: "should not appear"}))
+});
+assert.commandWorked(
+ db.runCommand({killCursors: changesCollection.getName(), cursors: [res.cursor.id]}));
}());
diff --git a/jstests/change_streams/pipeline_cannot_modify_id_field.js b/jstests/change_streams/pipeline_cannot_modify_id_field.js
index d43e1ff28a4..20909ab4f9a 100644
--- a/jstests/change_streams/pipeline_cannot_modify_id_field.js
+++ b/jstests/change_streams/pipeline_cannot_modify_id_field.js
@@ -3,144 +3,142 @@
* $changeStream pipeline.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- const coll = assertDropAndRecreateCollection(db, jsTestName());
+const coll = assertDropAndRecreateCollection(db, jsTestName());
- // Bare-bones $changeStream pipeline which will be augmented during tests.
- const changeStream = [{$changeStream: {}}];
+// Bare-bones $changeStream pipeline which will be augmented during tests.
+const changeStream = [{$changeStream: {}}];
- // Test-cases of transformations that modify or remove _id, and are thus disallowed.
- const idModifyingTransformations = [
- {$project: {_id: 0}},
- {$project: {_id: "newValue"}},
- {$project: {_id: "$otherField"}},
- {$project: {_id: 0, otherField: 0}},
- {$project: {_id: 0, otherField: 1}},
- {$project: {"_id._data": 0}},
- {$project: {"_id._data": 1}},
- {$project: {"_id._data": "newValue"}},
- {$project: {"_id._data": "$_id._data"}}, // Disallowed because it discards _typeBits.
- {$project: {"_id._data": "$otherField"}},
- {$project: {"_id.otherField": 1}},
- {$project: {"_id._typeBits": 0}},
- [
- {$project: {otherField: "$_id"}},
- {$project: {otherField: 0}},
- {$project: {_id: "$otherField"}}
- ],
- {$project: {_id: {data: "$_id._data", typeBits: "$_id._typeBits"}}}, // Fields renamed.
- {$project: {_id: {_typeBits: "$_id._typeBits", _data: "$_id._data"}}}, // Fields reordered.
- {$project: {_id: {_data: "$_id._typeBits", _typeBits: "$_id._data"}}}, // Fields swapped.
- {$set: {_id: "newValue"}},
- {$set: {_id: "$otherField"}},
- {$set: {"_id._data": "newValue"}},
- {$set: {"_id._data": "$otherField"}},
- {$set: {"_id.otherField": "newValue"}}, // New subfield added to _id.
- [
- {$addFields: {otherField: "$_id"}},
- {$set: {otherField: "newValue"}},
- {$set: {_id: "$otherField"}}
- ],
- [
- // Fields renamed.
- {$addFields: {newId: {data: "$_id._data", typeBits: "$_id._typeBits"}}},
- {$set: {_id: "$newId"}}
- ],
- [
- // Fields reordered.
- {$addFields: {newId: {_typeBits: "$_id._typeBits", _data: "$_id._data"}}},
- {$set: {_id: "$newId"}}
- ],
- [
- // Fields swapped.
- {$addFields: {newId: {_data: "$_id._typeBits", _typeBits: "$_id._data"}}},
- {$set: {_id: "$newId"}}
- ],
- {$replaceRoot: {newRoot: {otherField: "$_id"}}},
- {$replaceWith: {otherField: "$_id"}},
- {$redact: {$cond: {if: {$gt: ["$_id", {}]}, then: "$$DESCEND", else: "$$PRUNE"}}} // _id:0
- ];
-
- // Test-cases of projections which are allowed: explicit inclusion of _id, implicit inclusion of
- // _id, renames which retain the full _id field, exclusion of unrelated fields, addition of and
- // modifications to unrelated fields, sequential renames which ultimately preserve _id, etc.
- const idPreservingTransformations = [
- {$project: {_id: 1}},
- {$project: {_id: 1, otherField: 0}},
- {$project: {_id: 1, otherField: 1}},
- {$project: {_id: "$_id", otherField: 1}},
- {$project: {"_id.otherField": 0}},
- {$project: {otherField: 1}},
+// Test-cases of transformations that modify or remove _id, and are thus disallowed.
+const idModifyingTransformations = [
+ {$project: {_id: 0}},
+ {$project: {_id: "newValue"}},
+ {$project: {_id: "$otherField"}},
+ {$project: {_id: 0, otherField: 0}},
+ {$project: {_id: 0, otherField: 1}},
+ {$project: {"_id._data": 0}},
+ {$project: {"_id._data": 1}},
+ {$project: {"_id._data": "newValue"}},
+ {$project: {"_id._data": "$_id._data"}}, // Disallowed because it discards _typeBits.
+ {$project: {"_id._data": "$otherField"}},
+ {$project: {"_id.otherField": 1}},
+ {$project: {"_id._typeBits": 0}},
+ [
+ {$project: {otherField: "$_id"}},
{$project: {otherField: 0}},
+ {$project: {_id: "$otherField"}}
+ ],
+ {$project: {_id: {data: "$_id._data", typeBits: "$_id._typeBits"}}}, // Fields renamed.
+ {$project: {_id: {_typeBits: "$_id._typeBits", _data: "$_id._data"}}}, // Fields reordered.
+ {$project: {_id: {_data: "$_id._typeBits", _typeBits: "$_id._data"}}}, // Fields swapped.
+ {$set: {_id: "newValue"}},
+ {$set: {_id: "$otherField"}},
+ {$set: {"_id._data": "newValue"}},
+ {$set: {"_id._data": "$otherField"}},
+ {$set: {"_id.otherField": "newValue"}}, // New subfield added to _id.
+ [
+ {$addFields: {otherField: "$_id"}},
+ {$set: {otherField: "newValue"}},
+ {$set: {_id: "$otherField"}}
+ ],
+ [
+ // Fields renamed.
+ {$addFields: {newId: {data: "$_id._data", typeBits: "$_id._typeBits"}}},
+ {$set: {_id: "$newId"}}
+ ],
+ [
+ // Fields reordered.
+ {$addFields: {newId: {_typeBits: "$_id._typeBits", _data: "$_id._data"}}},
+ {$set: {_id: "$newId"}}
+ ],
+ [
+ // Fields swapped.
+ {$addFields: {newId: {_data: "$_id._typeBits", _typeBits: "$_id._data"}}},
+ {$set: {_id: "$newId"}}
+ ],
+ {$replaceRoot: {newRoot: {otherField: "$_id"}}},
+ {$replaceWith: {otherField: "$_id"}},
+ {$redact: {$cond: {if: {$gt: ["$_id", {}]}, then: "$$DESCEND", else: "$$PRUNE"}}} // _id:0
+];
+
+// Test-cases of projections which are allowed: explicit inclusion of _id, implicit inclusion of
+// _id, renames which retain the full _id field, exclusion of unrelated fields, addition of and
+// modifications to unrelated fields, sequential renames which ultimately preserve _id, etc.
+const idPreservingTransformations = [
+ {$project: {_id: 1}},
+ {$project: {_id: 1, otherField: 0}},
+ {$project: {_id: 1, otherField: 1}},
+ {$project: {_id: "$_id", otherField: 1}},
+ {$project: {"_id.otherField": 0}},
+ {$project: {otherField: 1}},
+ {$project: {otherField: 0}},
+ {$project: {otherField: "$_id"}},
+ [
{$project: {otherField: "$_id"}},
- [
- {$project: {otherField: "$_id"}},
- {$project: {otherField: 1}},
- {$project: {_id: "$otherField"}}
- ],
- {$project: {"_id._data": 1, "_id._typeBits": 1}},
- {$project: {_id: {_data: "$_id._data", _typeBits: "$_id._typeBits"}}},
- {$set: {_id: "$_id"}},
- {$addFields: {otherField: "newValue"}},
- {$set: {_id: {_data: "$_id._data", _typeBits: "$_id._typeBits"}}},
- [{$addFields: {otherField: "$_id"}}, {$set: {_id: "$otherField"}}],
- [
- {$addFields: {newId: {_data: "$_id._data", _typeBits: "$_id._typeBits"}}},
- {$set: {_id: "$newId"}}
- ],
- {$replaceRoot: {newRoot: {_id: "$_id"}}},
- {$replaceWith: {_id: "$_id"}},
- {
- $redact: {
- $cond: {
- if: {
- $or: [
- // Keeps _id, descends into fullDocument.
- {$not: {$isArray: "$tags"}},
- {$gt: [{$size: {$setIntersection: ["$tags", ["USA"]]}}, 0]}
- ]
- },
- then: "$$DESCEND",
- else: "$$PRUNE"
- }
- }
- },
- {$redact: "$$DESCEND"}, // Descends through entire document, retaining all of it.
- {$redact: "$$KEEP"} // Keeps entire document.
- ];
+ {$project: {otherField: 1}},
+ {$project: {_id: "$otherField"}}
+ ],
+ {$project: {"_id._data": 1, "_id._typeBits": 1}},
+ {$project: {_id: {_data: "$_id._data", _typeBits: "$_id._typeBits"}}},
+ {$set: {_id: "$_id"}},
+ {$addFields: {otherField: "newValue"}},
+ {$set: {_id: {_data: "$_id._data", _typeBits: "$_id._typeBits"}}},
+ [{$addFields: {otherField: "$_id"}}, {$set: {_id: "$otherField"}}],
+ [
+ {$addFields: {newId: {_data: "$_id._data", _typeBits: "$_id._typeBits"}}},
+ {$set: {_id: "$newId"}}
+ ],
+ {$replaceRoot: {newRoot: {_id: "$_id"}}},
+ {$replaceWith: {_id: "$_id"}},
+ {
+ $redact: {
+ $cond: {
+ if: {
+ $or: [
+ // Keeps _id, descends into fullDocument.
+ {$not: {$isArray: "$tags"}},
+ {$gt: [{$size: {$setIntersection: ["$tags", ["USA"]]}}, 0]}
+ ]
+ },
+ then: "$$DESCEND",
+ else: "$$PRUNE"
+ }
+ }
+ },
+ {$redact: "$$DESCEND"}, // Descends through entire document, retaining all of it.
+ {$redact: "$$KEEP"} // Keeps entire document.
+];
- let docId = 0;
+let docId = 0;
- // Verify that each of the whitelisted transformations above succeeds.
- for (let transform of idPreservingTransformations) {
- const cmdRes = assert.commandWorked(
- db.runCommand(
- {aggregate: coll.getName(), pipeline: changeStream.concat(transform), cursor: {}}),
- transform);
- assert.commandWorked(coll.insert({_id: docId++}));
- assert.soon(() => {
- const getMoreRes = assert.commandWorked(
- db.runCommand({getMore: cmdRes.cursor.id, collection: coll.getName()}), transform);
- return getMoreRes.cursor.nextBatch.length > 0;
- }, transform);
- }
+// Verify that each of the whitelisted transformations above succeeds.
+for (let transform of idPreservingTransformations) {
+ const cmdRes = assert.commandWorked(
+ db.runCommand(
+ {aggregate: coll.getName(), pipeline: changeStream.concat(transform), cursor: {}}),
+ transform);
+ assert.commandWorked(coll.insert({_id: docId++}));
+ assert.soon(() => {
+ const getMoreRes = assert.commandWorked(
+ db.runCommand({getMore: cmdRes.cursor.id, collection: coll.getName()}), transform);
+ return getMoreRes.cursor.nextBatch.length > 0;
+ }, transform);
+}
- // Verify that each of the blacklisted transformations above are rejected.
- for (let transform of idModifyingTransformations) {
- const cmdRes = assert.commandWorked(
- db.runCommand(
- {aggregate: coll.getName(), pipeline: changeStream.concat(transform), cursor: {}}),
- transform);
- assert.commandWorked(coll.insert({_id: docId++}));
- assert.soon(() => {
- const getMoreRes =
- db.runCommand({getMore: cmdRes.cursor.id, collection: coll.getName()});
- return !getMoreRes.ok &&
- assert.commandFailedWithCode(
- getMoreRes, ErrorCodes.ChangeStreamFatalError, transform);
- }, transform);
- }
+// Verify that each of the blacklisted transformations above are rejected.
+for (let transform of idModifyingTransformations) {
+ const cmdRes = assert.commandWorked(
+ db.runCommand(
+ {aggregate: coll.getName(), pipeline: changeStream.concat(transform), cursor: {}}),
+ transform);
+ assert.commandWorked(coll.insert({_id: docId++}));
+ assert.soon(() => {
+ const getMoreRes = db.runCommand({getMore: cmdRes.cursor.id, collection: coll.getName()});
+ return !getMoreRes.ok &&
+ assert.commandFailedWithCode(getMoreRes, ErrorCodes.ChangeStreamFatalError, transform);
+ }, transform);
+}
}());
diff --git a/jstests/change_streams/report_post_batch_resume_token.js b/jstests/change_streams/report_post_batch_resume_token.js
index 1055288a9f5..1e9a110c99f 100644
--- a/jstests/change_streams/report_post_batch_resume_token.js
+++ b/jstests/change_streams/report_post_batch_resume_token.js
@@ -4,188 +4,188 @@
* @tags: [uses_transactions]
*/
(function() {
- "use strict";
-
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
-
- // Drop and recreate collections to assure a clean run.
- const collName = "report_post_batch_resume_token";
- const testCollection = assertDropAndRecreateCollection(db, collName);
- const otherCollection = assertDropAndRecreateCollection(db, "unrelated_" + collName);
- const adminDB = db.getSiblingDB("admin");
-
- let docId = 0; // Tracks _id of documents inserted to ensure that we do not duplicate.
- const batchSize = 2;
-
- // Test that postBatchResumeToken is present on an initial aggregate of batchSize: 0.
- let csCursor = testCollection.watch([], {cursor: {batchSize: 0}});
- assert.eq(csCursor.objsLeftInBatch(), 0);
- let initialAggPBRT = csCursor.getResumeToken();
- assert.neq(undefined, initialAggPBRT);
-
- // Test that the PBRT does not advance beyond its initial value for a change stream whose
- // startAtOperationTime is in the future, even as writes are made to the test collection.
- const timestampIn2100 = Timestamp(4102444800, 1);
- csCursor = testCollection.watch([], {startAtOperationTime: timestampIn2100});
- assert.eq(csCursor.objsLeftInBatch(), 0);
- initialAggPBRT = csCursor.getResumeToken();
- assert.neq(undefined, initialAggPBRT);
-
- // Write some documents to the test collection.
- for (let i = 0; i < 5; ++i) {
- assert.commandWorked(testCollection.insert({_id: docId++}));
- }
-
- // Verify that no events are returned and the PBRT does not advance or go backwards.
- assert(!csCursor.hasNext()); // Causes a getMore to be dispatched.
- let getMorePBRT = csCursor.getResumeToken();
- assert.eq(bsonWoCompare(initialAggPBRT, getMorePBRT), 0);
-
- // Test that postBatchResumeToken is present on empty initial aggregate batch.
- csCursor = testCollection.watch();
- assert.eq(csCursor.objsLeftInBatch(), 0);
- initialAggPBRT = csCursor.getResumeToken();
- assert.neq(undefined, initialAggPBRT);
-
- // Test that postBatchResumeToken is present on empty getMore batch.
- assert(!csCursor.hasNext()); // Causes a getMore to be dispatched.
- getMorePBRT = csCursor.getResumeToken();
- assert.neq(undefined, getMorePBRT);
- assert.gte(bsonWoCompare(getMorePBRT, initialAggPBRT), 0);
-
- // Test that postBatchResumeToken advances with returned events. Insert one document into the
- // collection and consume the resulting change stream event.
+"use strict";
+
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+
+// Drop and recreate collections to assure a clean run.
+const collName = "report_post_batch_resume_token";
+const testCollection = assertDropAndRecreateCollection(db, collName);
+const otherCollection = assertDropAndRecreateCollection(db, "unrelated_" + collName);
+const adminDB = db.getSiblingDB("admin");
+
+let docId = 0; // Tracks _id of documents inserted to ensure that we do not duplicate.
+const batchSize = 2;
+
+// Test that postBatchResumeToken is present on an initial aggregate of batchSize: 0.
+let csCursor = testCollection.watch([], {cursor: {batchSize: 0}});
+assert.eq(csCursor.objsLeftInBatch(), 0);
+let initialAggPBRT = csCursor.getResumeToken();
+assert.neq(undefined, initialAggPBRT);
+
+// Test that the PBRT does not advance beyond its initial value for a change stream whose
+// startAtOperationTime is in the future, even as writes are made to the test collection.
+const timestampIn2100 = Timestamp(4102444800, 1);
+csCursor = testCollection.watch([], {startAtOperationTime: timestampIn2100});
+assert.eq(csCursor.objsLeftInBatch(), 0);
+initialAggPBRT = csCursor.getResumeToken();
+assert.neq(undefined, initialAggPBRT);
+
+// Write some documents to the test collection.
+for (let i = 0; i < 5; ++i) {
assert.commandWorked(testCollection.insert({_id: docId++}));
- assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
- assert(csCursor.objsLeftInBatch() == 1);
-
- // Because the retrieved event is the most recent entry in the oplog, the PBRT should be equal
- // to the resume token of the last item in the batch and greater than the initial PBRT.
- let resumeTokenFromDoc = csCursor.next()._id;
- getMorePBRT = csCursor.getResumeToken();
- assert.eq(bsonWoCompare(getMorePBRT, resumeTokenFromDoc), 0);
- assert.gt(bsonWoCompare(getMorePBRT, initialAggPBRT), 0);
-
- // Now seed the collection with enough documents to fit in two batches.
- for (let i = 0; i < batchSize * 2; i++) {
- assert.commandWorked(testCollection.insert({_id: docId++}));
- }
-
- // Test that the PBRT for a resumed stream is the given resume token if no result are returned.
- csCursor = testCollection.watch([], {resumeAfter: resumeTokenFromDoc, cursor: {batchSize: 0}});
- assert.eq(csCursor.objsLeftInBatch(), 0);
- initialAggPBRT = csCursor.getResumeToken();
- assert.neq(undefined, initialAggPBRT);
- assert.eq(bsonWoCompare(initialAggPBRT, resumeTokenFromDoc), 0);
-
- // Test that postBatchResumeToken advances with getMore. Iterate the cursor and assert that the
- // observed postBatchResumeToken advanced.
- assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
-
- // The postBatchResumeToken is again equal to the final token in the batch, and greater than the
- // PBRT from the initial response.
- let eventFromCursor = null;
- while (csCursor.objsLeftInBatch()) {
- eventFromCursor = csCursor.next();
- resumeTokenFromDoc = eventFromCursor._id;
- }
- getMorePBRT = csCursor.getResumeToken();
- assert.eq(bsonWoCompare(resumeTokenFromDoc, getMorePBRT), 0);
- assert.gt(bsonWoCompare(getMorePBRT, initialAggPBRT), 0);
-
- // Test that postBatchResumeToken advances with writes to an unrelated collection. First make
- // sure there is nothing left in our cursor, and obtain the latest PBRT...
- while (eventFromCursor.fullDocument._id < (docId - 1)) {
- assert.soon(() => csCursor.hasNext());
- eventFromCursor = csCursor.next();
- }
- assert(!csCursor.hasNext());
- let previousGetMorePBRT = csCursor.getResumeToken();
- assert.neq(undefined, previousGetMorePBRT);
-
- // ... then test that it advances on an insert to an unrelated collection.
- assert.commandWorked(otherCollection.insert({}));
- assert.soon(() => {
- assert(!csCursor.hasNext()); // Causes a getMore to be dispatched.
- getMorePBRT = csCursor.getResumeToken();
- return bsonWoCompare(getMorePBRT, previousGetMorePBRT) > 0;
- });
-
- // Insert two documents into the collection which are of the maximum BSON object size.
- const bsonUserSizeLimit = assert.commandWorked(adminDB.isMaster()).maxBsonObjectSize;
- assert.gt(bsonUserSizeLimit, 0);
- for (let i = 0; i < 2; ++i) {
- const docToInsert = {_id: docId++, padding: ""};
- docToInsert.padding = "a".repeat(bsonUserSizeLimit - Object.bsonsize(docToInsert));
- assert.commandWorked(testCollection.insert(docToInsert));
- }
-
- // Test that we return the correct postBatchResumeToken in the event that the batch hits the
- // byte size limit. Despite the fact that the batchSize is 2, we should only see 1 result,
- // because the second result cannot fit in the batch.
- assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
- assert.eq(csCursor.objsLeftInBatch(), 1);
-
- // Obtain the resume token and the PBRT from the first document.
- resumeTokenFromDoc = csCursor.next()._id;
- getMorePBRT = csCursor.getResumeToken();
-
- // Now retrieve the second event and confirm that the PBRTs and resume tokens are in-order.
- previousGetMorePBRT = getMorePBRT;
- assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
- assert.eq(csCursor.objsLeftInBatch(), 1);
- const resumeTokenFromSecondDoc = csCursor.next()._id;
- getMorePBRT = csCursor.getResumeToken();
- assert.gte(bsonWoCompare(previousGetMorePBRT, resumeTokenFromDoc), 0);
- assert.gt(bsonWoCompare(resumeTokenFromSecondDoc, previousGetMorePBRT), 0);
- assert.gte(bsonWoCompare(getMorePBRT, resumeTokenFromSecondDoc), 0);
-
- // Test that the PBRT is correctly updated when reading events from within a transaction.
- csCursor = testCollection.watch([], {cursor: {batchSize: batchSize}});
- const session = db.getMongo().startSession();
- const sessionDB = session.getDatabase(db.getName());
-
- const sessionColl = sessionDB[testCollection.getName()];
- const sessionOtherColl = sessionDB[otherCollection.getName()];
- session.startTransaction();
-
- // Write 3 documents to testCollection and 1 to the unrelated collection within the transaction.
- for (let i = 0; i < 3; ++i) {
- assert.commandWorked(sessionColl.insert({_id: docId++}));
- }
- assert.commandWorked(sessionOtherColl.insert({}));
- assert.commandWorked(session.commitTransaction_forTesting());
- session.endSession();
-
- // Grab the next 2 events, which should be the first 2 events in the transaction.
- previousGetMorePBRT = getMorePBRT;
- assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
- assert.eq(csCursor.objsLeftInBatch(), 2);
-
- // The clusterTime should be the same on each, but the resume token keeps advancing.
- const txnEvent1 = csCursor.next(), txnEvent2 = csCursor.next();
- const txnClusterTime = txnEvent1.clusterTime;
- assert.eq(txnEvent2.clusterTime, txnClusterTime);
- assert.gt(bsonWoCompare(txnEvent1._id, previousGetMorePBRT), 0);
- assert.gt(bsonWoCompare(txnEvent2._id, txnEvent1._id), 0);
-
- // The PBRT of the first transaction batch is equal to the last document's resumeToken.
- getMorePBRT = csCursor.getResumeToken();
- assert.eq(bsonWoCompare(getMorePBRT, txnEvent2._id), 0);
-
- // Now get the next batch. This contains the third of the four transaction operations.
- previousGetMorePBRT = getMorePBRT;
- assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
- assert.eq(csCursor.objsLeftInBatch(), 1);
-
- // The clusterTime of this event is the same as the two events from the previous batch, but its
- // resume token is greater than the previous PBRT.
- const txnEvent3 = csCursor.next();
- assert.eq(txnEvent3.clusterTime, txnClusterTime);
- assert.gt(bsonWoCompare(txnEvent3._id, previousGetMorePBRT), 0);
-
- // Because we wrote to the unrelated collection, the final event in the transaction does not
- // appear in the batch. Confirm that the postBatchResumeToken has been set correctly.
+}
+
+// Verify that no events are returned and the PBRT does not advance or go backwards.
+assert(!csCursor.hasNext()); // Causes a getMore to be dispatched.
+let getMorePBRT = csCursor.getResumeToken();
+assert.eq(bsonWoCompare(initialAggPBRT, getMorePBRT), 0);
+
+// Test that postBatchResumeToken is present on empty initial aggregate batch.
+csCursor = testCollection.watch();
+assert.eq(csCursor.objsLeftInBatch(), 0);
+initialAggPBRT = csCursor.getResumeToken();
+assert.neq(undefined, initialAggPBRT);
+
+// Test that postBatchResumeToken is present on empty getMore batch.
+assert(!csCursor.hasNext()); // Causes a getMore to be dispatched.
+getMorePBRT = csCursor.getResumeToken();
+assert.neq(undefined, getMorePBRT);
+assert.gte(bsonWoCompare(getMorePBRT, initialAggPBRT), 0);
+
+// Test that postBatchResumeToken advances with returned events. Insert one document into the
+// collection and consume the resulting change stream event.
+assert.commandWorked(testCollection.insert({_id: docId++}));
+assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
+assert(csCursor.objsLeftInBatch() == 1);
+
+// Because the retrieved event is the most recent entry in the oplog, the PBRT should be equal
+// to the resume token of the last item in the batch and greater than the initial PBRT.
+let resumeTokenFromDoc = csCursor.next()._id;
+getMorePBRT = csCursor.getResumeToken();
+assert.eq(bsonWoCompare(getMorePBRT, resumeTokenFromDoc), 0);
+assert.gt(bsonWoCompare(getMorePBRT, initialAggPBRT), 0);
+
+// Now seed the collection with enough documents to fit in two batches.
+for (let i = 0; i < batchSize * 2; i++) {
+ assert.commandWorked(testCollection.insert({_id: docId++}));
+}
+
+// Test that the PBRT for a resumed stream is the given resume token if no result are returned.
+csCursor = testCollection.watch([], {resumeAfter: resumeTokenFromDoc, cursor: {batchSize: 0}});
+assert.eq(csCursor.objsLeftInBatch(), 0);
+initialAggPBRT = csCursor.getResumeToken();
+assert.neq(undefined, initialAggPBRT);
+assert.eq(bsonWoCompare(initialAggPBRT, resumeTokenFromDoc), 0);
+
+// Test that postBatchResumeToken advances with getMore. Iterate the cursor and assert that the
+// observed postBatchResumeToken advanced.
+assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
+
+// The postBatchResumeToken is again equal to the final token in the batch, and greater than the
+// PBRT from the initial response.
+let eventFromCursor = null;
+while (csCursor.objsLeftInBatch()) {
+ eventFromCursor = csCursor.next();
+ resumeTokenFromDoc = eventFromCursor._id;
+}
+getMorePBRT = csCursor.getResumeToken();
+assert.eq(bsonWoCompare(resumeTokenFromDoc, getMorePBRT), 0);
+assert.gt(bsonWoCompare(getMorePBRT, initialAggPBRT), 0);
+
+// Test that postBatchResumeToken advances with writes to an unrelated collection. First make
+// sure there is nothing left in our cursor, and obtain the latest PBRT...
+while (eventFromCursor.fullDocument._id < (docId - 1)) {
+ assert.soon(() => csCursor.hasNext());
+ eventFromCursor = csCursor.next();
+}
+assert(!csCursor.hasNext());
+let previousGetMorePBRT = csCursor.getResumeToken();
+assert.neq(undefined, previousGetMorePBRT);
+
+// ... then test that it advances on an insert to an unrelated collection.
+assert.commandWorked(otherCollection.insert({}));
+assert.soon(() => {
+ assert(!csCursor.hasNext()); // Causes a getMore to be dispatched.
getMorePBRT = csCursor.getResumeToken();
- assert.gte(bsonWoCompare(getMorePBRT, txnEvent3._id), 0);
+ return bsonWoCompare(getMorePBRT, previousGetMorePBRT) > 0;
+});
+
+// Insert two documents into the collection which are of the maximum BSON object size.
+const bsonUserSizeLimit = assert.commandWorked(adminDB.isMaster()).maxBsonObjectSize;
+assert.gt(bsonUserSizeLimit, 0);
+for (let i = 0; i < 2; ++i) {
+ const docToInsert = {_id: docId++, padding: ""};
+ docToInsert.padding = "a".repeat(bsonUserSizeLimit - Object.bsonsize(docToInsert));
+ assert.commandWorked(testCollection.insert(docToInsert));
+}
+
+// Test that we return the correct postBatchResumeToken in the event that the batch hits the
+// byte size limit. Despite the fact that the batchSize is 2, we should only see 1 result,
+// because the second result cannot fit in the batch.
+assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
+assert.eq(csCursor.objsLeftInBatch(), 1);
+
+// Obtain the resume token and the PBRT from the first document.
+resumeTokenFromDoc = csCursor.next()._id;
+getMorePBRT = csCursor.getResumeToken();
+
+// Now retrieve the second event and confirm that the PBRTs and resume tokens are in-order.
+previousGetMorePBRT = getMorePBRT;
+assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
+assert.eq(csCursor.objsLeftInBatch(), 1);
+const resumeTokenFromSecondDoc = csCursor.next()._id;
+getMorePBRT = csCursor.getResumeToken();
+assert.gte(bsonWoCompare(previousGetMorePBRT, resumeTokenFromDoc), 0);
+assert.gt(bsonWoCompare(resumeTokenFromSecondDoc, previousGetMorePBRT), 0);
+assert.gte(bsonWoCompare(getMorePBRT, resumeTokenFromSecondDoc), 0);
+
+// Test that the PBRT is correctly updated when reading events from within a transaction.
+csCursor = testCollection.watch([], {cursor: {batchSize: batchSize}});
+const session = db.getMongo().startSession();
+const sessionDB = session.getDatabase(db.getName());
+
+const sessionColl = sessionDB[testCollection.getName()];
+const sessionOtherColl = sessionDB[otherCollection.getName()];
+session.startTransaction();
+
+// Write 3 documents to testCollection and 1 to the unrelated collection within the transaction.
+for (let i = 0; i < 3; ++i) {
+ assert.commandWorked(sessionColl.insert({_id: docId++}));
+}
+assert.commandWorked(sessionOtherColl.insert({}));
+assert.commandWorked(session.commitTransaction_forTesting());
+session.endSession();
+
+// Grab the next 2 events, which should be the first 2 events in the transaction.
+previousGetMorePBRT = getMorePBRT;
+assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
+assert.eq(csCursor.objsLeftInBatch(), 2);
+
+// The clusterTime should be the same on each, but the resume token keeps advancing.
+const txnEvent1 = csCursor.next(), txnEvent2 = csCursor.next();
+const txnClusterTime = txnEvent1.clusterTime;
+assert.eq(txnEvent2.clusterTime, txnClusterTime);
+assert.gt(bsonWoCompare(txnEvent1._id, previousGetMorePBRT), 0);
+assert.gt(bsonWoCompare(txnEvent2._id, txnEvent1._id), 0);
+
+// The PBRT of the first transaction batch is equal to the last document's resumeToken.
+getMorePBRT = csCursor.getResumeToken();
+assert.eq(bsonWoCompare(getMorePBRT, txnEvent2._id), 0);
+
+// Now get the next batch. This contains the third of the four transaction operations.
+previousGetMorePBRT = getMorePBRT;
+assert.soon(() => csCursor.hasNext()); // Causes a getMore to be dispatched.
+assert.eq(csCursor.objsLeftInBatch(), 1);
+
+// The clusterTime of this event is the same as the two events from the previous batch, but its
+// resume token is greater than the previous PBRT.
+const txnEvent3 = csCursor.next();
+assert.eq(txnEvent3.clusterTime, txnClusterTime);
+assert.gt(bsonWoCompare(txnEvent3._id, previousGetMorePBRT), 0);
+
+// Because we wrote to the unrelated collection, the final event in the transaction does not
+// appear in the batch. Confirm that the postBatchResumeToken has been set correctly.
+getMorePBRT = csCursor.getResumeToken();
+assert.gte(bsonWoCompare(getMorePBRT, txnEvent3._id), 0);
})();
diff --git a/jstests/change_streams/required_as_first_stage.js b/jstests/change_streams/required_as_first_stage.js
index bdc0b43ba0c..2c5128f4865 100644
--- a/jstests/change_streams/required_as_first_stage.js
+++ b/jstests/change_streams/required_as_first_stage.js
@@ -5,50 +5,48 @@
// recognize the intentionally malformed aggergations that we test here.
// @tags: [change_stream_does_not_expect_txns]
(function() {
- "use strict";
-
- load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
-
- const coll = assertDropAndRecreateCollection(db, "change_stream_required_as_first_stage");
-
- assertErrorCode(coll, [{$match: {z: 34}}, {$changeStream: {}}], 40602);
- assertErrorCode(coll, [{$indexStats: {}}, {$changeStream: {}}], 40602);
- assertErrorCode(
- coll,
- [{$indexStats: {}}, {$changeStream: {}}, {$match: {test: "this is an extra stage"}}],
- 40602);
-
- let error = assert.throws(() => coll.aggregate([{$sort: {x: 1}}, {$changeStream: {}}]));
- assert.contains(error.code, [40602, 50988], "Unexpected error: " + tojson(error));
-
- error = assert.throws(
- () => coll.aggregate([{$sort: {x: 1}}, {$changeStream: {}}], {allowDiskUse: true}));
- assert.contains(error.code, [40602, 50988], "Unexpected error: " + tojson(error));
-
- error = assert.throws(() => coll.aggregate([{$group: {_id: "$x"}}, {$changeStream: {}}]));
- assert.contains(error.code, [40602, 50988], "Unexpected error: " + tojson(error));
-
- // This one has a different error code because of conflicting host type requirements: the $group
- // needs to merge on a shard, but the $changeStream needs to merge on mongos. This doesn't
- // happen for the $sort because the half of the $sort running on mongos is pre-sorted, and so
- // won't need disk space.
- error = assert.throws(
- () => coll.aggregate([{$group: {_id: "$x"}}, {$changeStream: {}}], {allowDiskUse: true}));
- assert.contains(
- error.code, [40602, ErrorCodes.IllegalOperation], "Unexpected error: " + tojson(error));
-
- // Test that a $changeStream stage is not allowed within a $facet stage.
- assertErrorCode(coll, [{$facet: {testPipe: [{$changeStream: {}}]}}], 40600);
- assertErrorCode(coll,
- [{
- $facet: {
- testPipe: [
- {$indexStats: {}},
- {$changeStream: {}},
- {$match: {test: "this is an extra stage"}}
- ]
- }
- }],
- 40600);
+"use strict";
+
+load("jstests/aggregation/extras/utils.js"); // For assertErrorCode.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+
+const coll = assertDropAndRecreateCollection(db, "change_stream_required_as_first_stage");
+
+assertErrorCode(coll, [{$match: {z: 34}}, {$changeStream: {}}], 40602);
+assertErrorCode(coll, [{$indexStats: {}}, {$changeStream: {}}], 40602);
+assertErrorCode(
+ coll,
+ [{$indexStats: {}}, {$changeStream: {}}, {$match: {test: "this is an extra stage"}}],
+ 40602);
+
+let error = assert.throws(() => coll.aggregate([{$sort: {x: 1}}, {$changeStream: {}}]));
+assert.contains(error.code, [40602, 50988], "Unexpected error: " + tojson(error));
+
+error = assert.throws(
+ () => coll.aggregate([{$sort: {x: 1}}, {$changeStream: {}}], {allowDiskUse: true}));
+assert.contains(error.code, [40602, 50988], "Unexpected error: " + tojson(error));
+
+error = assert.throws(() => coll.aggregate([{$group: {_id: "$x"}}, {$changeStream: {}}]));
+assert.contains(error.code, [40602, 50988], "Unexpected error: " + tojson(error));
+
+// This one has a different error code because of conflicting host type requirements: the $group
+// needs to merge on a shard, but the $changeStream needs to merge on mongos. This doesn't
+// happen for the $sort because the half of the $sort running on mongos is pre-sorted, and so
+// won't need disk space.
+error = assert.throws(
+ () => coll.aggregate([{$group: {_id: "$x"}}, {$changeStream: {}}], {allowDiskUse: true}));
+assert.contains(
+ error.code, [40602, ErrorCodes.IllegalOperation], "Unexpected error: " + tojson(error));
+
+// Test that a $changeStream stage is not allowed within a $facet stage.
+assertErrorCode(coll, [{$facet: {testPipe: [{$changeStream: {}}]}}], 40600);
+assertErrorCode(
+ coll,
+ [{
+ $facet: {
+ testPipe:
+ [{$indexStats: {}}, {$changeStream: {}}, {$match: {test: "this is an extra stage"}}]
+ }
+ }],
+ 40600);
}());
diff --git a/jstests/change_streams/resume_from_high_water_mark_token.js b/jstests/change_streams/resume_from_high_water_mark_token.js
index 973fc32d9c9..edd34db162b 100644
--- a/jstests/change_streams/resume_from_high_water_mark_token.js
+++ b/jstests/change_streams/resume_from_high_water_mark_token.js
@@ -2,282 +2,265 @@
* Tests that a synthetic high-water-mark (HWM) token obeys the same semantics as a regular token.
*/
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/change_stream_util.js"); // For runCommandChangeStreamPassthroughAware.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/change_stream_util.js"); // For runCommandChangeStreamPassthroughAware.
- // Drop the test collections to assure a clean run.
- const collName = jsTestName();
- const otherCollName = "unrelated_" + collName;
- assertDropCollection(db, collName);
- assertDropCollection(db, otherCollName);
+// Drop the test collections to assure a clean run.
+const collName = jsTestName();
+const otherCollName = "unrelated_" + collName;
+assertDropCollection(db, collName);
+assertDropCollection(db, otherCollName);
- // Helper function to ensure that the specified command is not modified by the passthroughs.
- function runExactCommand(db, cmdObj) {
- const doNotModifyInPassthroughs = true;
- return runCommandChangeStreamPassthroughAware(db, cmdObj, doNotModifyInPassthroughs);
- }
+// Helper function to ensure that the specified command is not modified by the passthroughs.
+function runExactCommand(db, cmdObj) {
+ const doNotModifyInPassthroughs = true;
+ return runCommandChangeStreamPassthroughAware(db, cmdObj, doNotModifyInPassthroughs);
+}
- let docId = 0; // Tracks _id of documents inserted to ensure that we do not duplicate.
+let docId = 0; // Tracks _id of documents inserted to ensure that we do not duplicate.
- // Open a stream on the test collection, before the collection has actually been created. Make
- // sure that this command is not modified in the passthroughs, since this behaviour is only
- // relevant for single-collection streams.
- let cmdResBeforeCollExists = assert.commandWorked(
- runExactCommand(db, {aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
+// Open a stream on the test collection, before the collection has actually been created. Make
+// sure that this command is not modified in the passthroughs, since this behaviour is only
+// relevant for single-collection streams.
+let cmdResBeforeCollExists = assert.commandWorked(
+ runExactCommand(db, {aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
- // We should be able to retrieve a postBatchResumeToken (PBRT) even with no collection present.
- let csCursor = new DBCommandCursor(db, cmdResBeforeCollExists);
- let pbrtBeforeCollExists = csCursor.getResumeToken();
- assert.neq(undefined, pbrtBeforeCollExists);
- csCursor.close();
+// We should be able to retrieve a postBatchResumeToken (PBRT) even with no collection present.
+let csCursor = new DBCommandCursor(db, cmdResBeforeCollExists);
+let pbrtBeforeCollExists = csCursor.getResumeToken();
+assert.neq(undefined, pbrtBeforeCollExists);
+csCursor.close();
- // We can resumeAfter and startAfter the token while the collection still does not exist.
- for (let resumeType of["startAfter", "resumeAfter"]) {
- cmdResBeforeCollExists = assert.commandWorked(runExactCommand(db, {
- aggregate: collName,
- pipeline: [
- {$changeStream: {[resumeType]: pbrtBeforeCollExists}},
- {
- $match: {
- $or: [
- {"fullDocument._id": "INSERT_ONE"},
- {"fullDocument._id": "INSERT_TWO"}
- ]
- }
- }
- ],
- cursor: {}
- }));
- }
- csCursor = new DBCommandCursor(db, cmdResBeforeCollExists);
-
- // If the collection is then created with a case-insensitive collation, the resumed stream
- // continues to use the simple collation. We see 'INSERT_TWO' but not 'insert_one'.
- const testCollationCollection =
- assertCreateCollection(db, collName, {collation: {locale: "en_US", strength: 2}});
- assert.commandWorked(testCollationCollection.insert({_id: "insert_one"}));
- assert.commandWorked(testCollationCollection.insert({_id: "INSERT_TWO"}));
- assert.soon(() => csCursor.hasNext());
- assert.docEq(csCursor.next().fullDocument, {_id: "INSERT_TWO"});
- csCursor.close();
-
- // We can resume from the pre-creation high water mark if we do not specify a collation...
- let cmdResResumeFromBeforeCollCreated = assert.commandWorked(runExactCommand(db, {
+// We can resumeAfter and startAfter the token while the collection still does not exist.
+for (let resumeType of ["startAfter", "resumeAfter"]) {
+ cmdResBeforeCollExists = assert.commandWorked(runExactCommand(db, {
aggregate: collName,
pipeline: [
- {$changeStream: {resumeAfter: pbrtBeforeCollExists}},
+ {$changeStream: {[resumeType]: pbrtBeforeCollExists}},
{
- $match:
- {$or: [{"fullDocument._id": "INSERT_ONE"}, {"fullDocument._id": "INSERT_TWO"}]}
+ $match:
+ {$or: [{"fullDocument._id": "INSERT_ONE"}, {"fullDocument._id": "INSERT_TWO"}]}
}
],
cursor: {}
}));
+}
+csCursor = new DBCommandCursor(db, cmdResBeforeCollExists);
- // ... but we will not inherit the collection's case-insensitive collation, instead defaulting
- // to the simple collation. We will therefore match 'INSERT_TWO' but not 'insert_one'.
- csCursor = new DBCommandCursor(db, cmdResResumeFromBeforeCollCreated);
- assert.soon(() => csCursor.hasNext());
- assert.docEq(csCursor.next().fullDocument, {_id: "INSERT_TWO"});
- csCursor.close();
+// If the collection is then created with a case-insensitive collation, the resumed stream
+// continues to use the simple collation. We see 'INSERT_TWO' but not 'insert_one'.
+const testCollationCollection =
+ assertCreateCollection(db, collName, {collation: {locale: "en_US", strength: 2}});
+assert.commandWorked(testCollationCollection.insert({_id: "insert_one"}));
+assert.commandWorked(testCollationCollection.insert({_id: "INSERT_TWO"}));
+assert.soon(() => csCursor.hasNext());
+assert.docEq(csCursor.next().fullDocument, {_id: "INSERT_TWO"});
+csCursor.close();
- // If we do specify a non-simple collation, it will be adopted by the pipeline.
- cmdResResumeFromBeforeCollCreated = assert.commandWorked(runExactCommand(db, {
- aggregate: collName,
- pipeline: [
- {$changeStream: {resumeAfter: pbrtBeforeCollExists}},
- {
- $match:
- {$or: [{"fullDocument._id": "INSERT_ONE"}, {"fullDocument._id": "INSERT_TWO"}]}
- }
- ],
- collation: {locale: "en_US", strength: 2},
- cursor: {}
- }));
+// We can resume from the pre-creation high water mark if we do not specify a collation...
+let cmdResResumeFromBeforeCollCreated = assert.commandWorked(runExactCommand(db, {
+ aggregate: collName,
+ pipeline: [
+ {$changeStream: {resumeAfter: pbrtBeforeCollExists}},
+ {$match: {$or: [{"fullDocument._id": "INSERT_ONE"}, {"fullDocument._id": "INSERT_TWO"}]}}
+ ],
+ cursor: {}
+}));
- // Now we match both 'insert_one' and 'INSERT_TWO'.
- csCursor = new DBCommandCursor(db, cmdResResumeFromBeforeCollCreated);
- assert.soon(() => csCursor.hasNext());
- assert.docEq(csCursor.next().fullDocument, {_id: "insert_one"});
- assert.soon(() => csCursor.hasNext());
- assert.docEq(csCursor.next().fullDocument, {_id: "INSERT_TWO"});
- csCursor.close();
+// ... but we will not inherit the collection's case-insensitive collation, instead defaulting
+// to the simple collation. We will therefore match 'INSERT_TWO' but not 'insert_one'.
+csCursor = new DBCommandCursor(db, cmdResResumeFromBeforeCollCreated);
+assert.soon(() => csCursor.hasNext());
+assert.docEq(csCursor.next().fullDocument, {_id: "INSERT_TWO"});
+csCursor.close();
- // Now open a change stream with batchSize:0 in order to produce a new high water mark.
- const cmdResCollWithCollation = assert.commandWorked(runExactCommand(db, {
- aggregate: collName,
- pipeline: [
- {$changeStream: {}},
- ],
- cursor: {batchSize: 0}
- }));
- csCursor = new DBCommandCursor(db, cmdResCollWithCollation);
- const hwmFromCollWithCollation = csCursor.getResumeToken();
- assert.neq(undefined, hwmFromCollWithCollation);
- csCursor.close();
+// If we do specify a non-simple collation, it will be adopted by the pipeline.
+cmdResResumeFromBeforeCollCreated = assert.commandWorked(runExactCommand(db, {
+ aggregate: collName,
+ pipeline: [
+ {$changeStream: {resumeAfter: pbrtBeforeCollExists}},
+ {$match: {$or: [{"fullDocument._id": "INSERT_ONE"}, {"fullDocument._id": "INSERT_TWO"}]}}
+ ],
+ collation: {locale: "en_US", strength: 2},
+ cursor: {}
+}));
- // Insert two more documents into the collection for testing purposes.
- assert.commandWorked(testCollationCollection.insert({_id: "insert_three"}));
- assert.commandWorked(testCollationCollection.insert({_id: "INSERT_FOUR"}));
+// Now we match both 'insert_one' and 'INSERT_TWO'.
+csCursor = new DBCommandCursor(db, cmdResResumeFromBeforeCollCreated);
+assert.soon(() => csCursor.hasNext());
+assert.docEq(csCursor.next().fullDocument, {_id: "insert_one"});
+assert.soon(() => csCursor.hasNext());
+assert.docEq(csCursor.next().fullDocument, {_id: "INSERT_TWO"});
+csCursor.close();
- // We can resume the stream on the collection using the HWM...
- const cmdResResumeWithCollation = assert.commandWorked(runExactCommand(db, {
- aggregate: collName,
- pipeline: [
- {$changeStream: {resumeAfter: hwmFromCollWithCollation}},
- {
- $match: {
- $or: [
- {"fullDocument._id": "INSERT_THREE"},
- {"fullDocument._id": "INSERT_FOUR"}
- ]
- }
- }
- ],
- cursor: {}
- }));
- csCursor = new DBCommandCursor(db, cmdResResumeWithCollation);
+// Now open a change stream with batchSize:0 in order to produce a new high water mark.
+const cmdResCollWithCollation = assert.commandWorked(runExactCommand(db, {
+ aggregate: collName,
+ pipeline: [
+ {$changeStream: {}},
+ ],
+ cursor: {batchSize: 0}
+}));
+csCursor = new DBCommandCursor(db, cmdResCollWithCollation);
+const hwmFromCollWithCollation = csCursor.getResumeToken();
+assert.neq(undefined, hwmFromCollWithCollation);
+csCursor.close();
- // ... but we do not inherit the collection's case-insensitive collation, matching 'INSERT_FOUR'
- // but not the preceding 'insert_three'.
- assert.soon(() => csCursor.hasNext());
- assert.docEq(csCursor.next().fullDocument, {_id: "INSERT_FOUR"});
- csCursor.close();
+// Insert two more documents into the collection for testing purposes.
+assert.commandWorked(testCollationCollection.insert({_id: "insert_three"}));
+assert.commandWorked(testCollationCollection.insert({_id: "INSERT_FOUR"}));
- // Drop the collection and obtain a new pre-creation high water mark. We will use this later.
- assertDropCollection(db, collName);
- cmdResBeforeCollExists = assert.commandWorked(
- runExactCommand(db, {aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
- csCursor = new DBCommandCursor(db, cmdResBeforeCollExists);
- pbrtBeforeCollExists = csCursor.getResumeToken();
- assert.neq(undefined, pbrtBeforeCollExists);
- csCursor.close();
+// We can resume the stream on the collection using the HWM...
+const cmdResResumeWithCollation = assert.commandWorked(runExactCommand(db, {
+ aggregate: collName,
+ pipeline: [
+ {$changeStream: {resumeAfter: hwmFromCollWithCollation}},
+ {$match: {$or: [{"fullDocument._id": "INSERT_THREE"}, {"fullDocument._id": "INSERT_FOUR"}]}}
+ ],
+ cursor: {}
+}));
+csCursor = new DBCommandCursor(db, cmdResResumeWithCollation);
- // Now create each of the test collections with the default simple collation.
- const testCollection = assertCreateCollection(db, collName);
- const otherCollection = assertCreateCollection(db, otherCollName);
- const adminDB = db.getSiblingDB("admin");
+// ... but we do not inherit the collection's case-insensitive collation, matching 'INSERT_FOUR'
+// but not the preceding 'insert_three'.
+assert.soon(() => csCursor.hasNext());
+assert.docEq(csCursor.next().fullDocument, {_id: "INSERT_FOUR"});
+csCursor.close();
- // Open a stream on the test collection, and write a document to it.
- csCursor = testCollection.watch();
- assert.commandWorked(testCollection.insert({_id: docId++}));
+// Drop the collection and obtain a new pre-creation high water mark. We will use this later.
+assertDropCollection(db, collName);
+cmdResBeforeCollExists = assert.commandWorked(
+ runExactCommand(db, {aggregate: collName, pipeline: [{$changeStream: {}}], cursor: {}}));
+csCursor = new DBCommandCursor(db, cmdResBeforeCollExists);
+pbrtBeforeCollExists = csCursor.getResumeToken();
+assert.neq(undefined, pbrtBeforeCollExists);
+csCursor.close();
- // Write an event to the unrelated collection in order to advance the PBRT, and then consume all
- // events. When we see a PBRT that is greater than the timestamp of the last event (stored in
- // 'relatedEvent'), we know it must be a synthetic high-water-mark token.
- //
- // Note that the first insert into the unrelated collection may not be enough to advance the
- // PBRT; some passthroughs will group the unrelated write into a transaction with the related
- // write, giving them the same timestamp. We put the unrelated insert into the assert.soon loop,
- // so that it will eventually get its own transaction with a new timestamp.
- let relatedEvent = null;
- let hwmToken = null;
- assert.soon(() => {
- assert.commandWorked(otherCollection.insert({}));
- if (csCursor.hasNext()) {
- relatedEvent = csCursor.next();
- }
- assert.eq(csCursor.objsLeftInBatch(), 0);
- hwmToken = csCursor.getResumeToken();
- assert.neq(undefined, hwmToken);
- return relatedEvent && bsonWoCompare(hwmToken, relatedEvent._id) > 0;
- });
- csCursor.close();
+// Now create each of the test collections with the default simple collation.
+const testCollection = assertCreateCollection(db, collName);
+const otherCollection = assertCreateCollection(db, otherCollName);
+const adminDB = db.getSiblingDB("admin");
- // Now write some further documents to the collection before attempting to resume.
- for (let i = 0; i < 5; ++i) {
- assert.commandWorked(testCollection.insert({_id: docId++}));
- }
+// Open a stream on the test collection, and write a document to it.
+csCursor = testCollection.watch();
+assert.commandWorked(testCollection.insert({_id: docId++}));
- // We can resumeAfter and startAfter the high water mark. We only see the latest 5 documents.
- for (let resumeType of["startAfter", "resumeAfter"]) {
- csCursor = testCollection.watch([], {[resumeType]: hwmToken});
- assert.soon(() => {
- if (csCursor.hasNext()) {
- relatedEvent = csCursor.next();
- assert.gt(bsonWoCompare(relatedEvent._id, hwmToken), 0);
- // We never see the first document, whose _id was 0.
- assert.gt(relatedEvent.fullDocument._id, 0);
- }
- // The _id of the last document inserted is (docId-1).
- return relatedEvent.fullDocument._id === (docId - 1);
- });
- csCursor.close();
+// Write an event to the unrelated collection in order to advance the PBRT, and then consume all
+// events. When we see a PBRT that is greater than the timestamp of the last event (stored in
+// 'relatedEvent'), we know it must be a synthetic high-water-mark token.
+//
+// Note that the first insert into the unrelated collection may not be enough to advance the
+// PBRT; some passthroughs will group the unrelated write into a transaction with the related
+// write, giving them the same timestamp. We put the unrelated insert into the assert.soon loop,
+// so that it will eventually get its own transaction with a new timestamp.
+let relatedEvent = null;
+let hwmToken = null;
+assert.soon(() => {
+ assert.commandWorked(otherCollection.insert({}));
+ if (csCursor.hasNext()) {
+ relatedEvent = csCursor.next();
}
+ assert.eq(csCursor.objsLeftInBatch(), 0);
+ hwmToken = csCursor.getResumeToken();
+ assert.neq(undefined, hwmToken);
+ return relatedEvent && bsonWoCompare(hwmToken, relatedEvent._id) > 0;
+});
+csCursor.close();
- // Now resumeAfter the token that was generated before the collection was created...
- cmdResResumeFromBeforeCollCreated = assert.commandWorked(runExactCommand(db, {
- aggregate: collName,
- pipeline: [{$changeStream: {resumeAfter: pbrtBeforeCollExists}}],
- cursor: {}
- }));
- // ... and confirm that we see all the events that have occurred since then.
- csCursor = new DBCommandCursor(db, cmdResResumeFromBeforeCollCreated);
- let docCount = 0;
+// Now write some further documents to the collection before attempting to resume.
+for (let i = 0; i < 5; ++i) {
+ assert.commandWorked(testCollection.insert({_id: docId++}));
+}
+
+// We can resumeAfter and startAfter the high water mark. We only see the latest 5 documents.
+for (let resumeType of ["startAfter", "resumeAfter"]) {
+ csCursor = testCollection.watch([], {[resumeType]: hwmToken});
assert.soon(() => {
if (csCursor.hasNext()) {
relatedEvent = csCursor.next();
- assert.eq(relatedEvent.fullDocument._id, docCount++);
+ assert.gt(bsonWoCompare(relatedEvent._id, hwmToken), 0);
+ // We never see the first document, whose _id was 0.
+ assert.gt(relatedEvent.fullDocument._id, 0);
}
- return docCount === docId;
- });
-
- // Despite the fact that we just resumed from a token which was generated before the collection
- // existed and had no UUID, all subsequent HWMs should now have UUIDs. To test this, we first
- // get the current resume token, then write a document to the unrelated collection. We then wait
- // until the PBRT advances, which means that we now have a new HWM token.
- let hwmPostCreation = csCursor.getResumeToken();
- assert.commandWorked(otherCollection.insert({}));
- assert.soon(() => {
- assert(!csCursor.hasNext());
- return bsonWoCompare(csCursor.getResumeToken(), hwmPostCreation) > 0;
+ // The _id of the last document inserted is (docId-1).
+ return relatedEvent.fullDocument._id === (docId - 1);
});
- hwmPostCreation = csCursor.getResumeToken();
csCursor.close();
+}
- // We can resume from the token if the collection is dropped...
- assertDropCollection(db, collName);
- assert.commandWorked(runExactCommand(db, {
- aggregate: collName,
- pipeline: [{$changeStream: {resumeAfter: hwmPostCreation}}],
- cursor: {}
- }));
- // ... or if the collection is recreated with a different UUID...
- assertCreateCollection(db, collName);
- assert.commandWorked(runExactCommand(db, {
- aggregate: collName,
- pipeline: [{$changeStream: {resumeAfter: hwmPostCreation}}],
- cursor: {}
- }));
- // ... or if we specify an explicit collation.
- assert.commandWorked(runExactCommand(db, {
- aggregate: collName,
- pipeline: [{$changeStream: {resumeAfter: hwmPostCreation}}],
- collation: {locale: "simple"},
- cursor: {}
- }));
+// Now resumeAfter the token that was generated before the collection was created...
+cmdResResumeFromBeforeCollCreated = assert.commandWorked(runExactCommand(db, {
+ aggregate: collName,
+ pipeline: [{$changeStream: {resumeAfter: pbrtBeforeCollExists}}],
+ cursor: {}
+}));
+// ... and confirm that we see all the events that have occurred since then.
+csCursor = new DBCommandCursor(db, cmdResResumeFromBeforeCollCreated);
+let docCount = 0;
+assert.soon(() => {
+ if (csCursor.hasNext()) {
+ relatedEvent = csCursor.next();
+ assert.eq(relatedEvent.fullDocument._id, docCount++);
+ }
+ return docCount === docId;
+});
- // Even after the collection is recreated, we can still resume from the pre-creation HWM...
- cmdResResumeFromBeforeCollCreated = assert.commandWorked(runExactCommand(db, {
- aggregate: collName,
- pipeline: [{$changeStream: {resumeAfter: pbrtBeforeCollExists}}],
- cursor: {}
- }));
- // ...and we can still see all the events from the collection's original incarnation...
- csCursor = new DBCommandCursor(db, cmdResResumeFromBeforeCollCreated);
- docCount = 0;
- assert.soon(() => {
- if (csCursor.hasNext()) {
- relatedEvent = csCursor.next();
- assert.eq(relatedEvent.fullDocument._id, docCount++);
- }
- return docCount === docId;
- });
- // ... this time followed by an invalidate, as the collection is dropped.
- assert.soon(() => {
- return csCursor.hasNext() && csCursor.next().operationType === "invalidate";
- });
- csCursor.close();
+// Despite the fact that we just resumed from a token which was generated before the collection
+// existed and had no UUID, all subsequent HWMs should now have UUIDs. To test this, we first
+// get the current resume token, then write a document to the unrelated collection. We then wait
+// until the PBRT advances, which means that we now have a new HWM token.
+let hwmPostCreation = csCursor.getResumeToken();
+assert.commandWorked(otherCollection.insert({}));
+assert.soon(() => {
+ assert(!csCursor.hasNext());
+ return bsonWoCompare(csCursor.getResumeToken(), hwmPostCreation) > 0;
+});
+hwmPostCreation = csCursor.getResumeToken();
+csCursor.close();
+
+// We can resume from the token if the collection is dropped...
+assertDropCollection(db, collName);
+assert.commandWorked(runExactCommand(db, {
+ aggregate: collName,
+ pipeline: [{$changeStream: {resumeAfter: hwmPostCreation}}],
+ cursor: {}
+}));
+// ... or if the collection is recreated with a different UUID...
+assertCreateCollection(db, collName);
+assert.commandWorked(runExactCommand(db, {
+ aggregate: collName,
+ pipeline: [{$changeStream: {resumeAfter: hwmPostCreation}}],
+ cursor: {}
+}));
+// ... or if we specify an explicit collation.
+assert.commandWorked(runExactCommand(db, {
+ aggregate: collName,
+ pipeline: [{$changeStream: {resumeAfter: hwmPostCreation}}],
+ collation: {locale: "simple"},
+ cursor: {}
+}));
+
+// Even after the collection is recreated, we can still resume from the pre-creation HWM...
+cmdResResumeFromBeforeCollCreated = assert.commandWorked(runExactCommand(db, {
+ aggregate: collName,
+ pipeline: [{$changeStream: {resumeAfter: pbrtBeforeCollExists}}],
+ cursor: {}
+}));
+// ...and we can still see all the events from the collection's original incarnation...
+csCursor = new DBCommandCursor(db, cmdResResumeFromBeforeCollCreated);
+docCount = 0;
+assert.soon(() => {
+ if (csCursor.hasNext()) {
+ relatedEvent = csCursor.next();
+ assert.eq(relatedEvent.fullDocument._id, docCount++);
+ }
+ return docCount === docId;
+});
+// ... this time followed by an invalidate, as the collection is dropped.
+assert.soon(() => {
+ return csCursor.hasNext() && csCursor.next().operationType === "invalidate";
+});
+csCursor.close();
})(); \ No newline at end of file
diff --git a/jstests/change_streams/shell_helper.js b/jstests/change_streams/shell_helper.js
index 766c425c3f7..b4e8aae00b3 100644
--- a/jstests/change_streams/shell_helper.js
+++ b/jstests/change_streams/shell_helper.js
@@ -7,217 +7,217 @@
// based on the commit oplog entry, which would cause this test to fail.
// @tags: [change_stream_does_not_expect_txns]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- load("jstests/libs/change_stream_util.js"); // For assertInvalidateOp.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/libs/change_stream_util.js"); // For assertInvalidateOp.
- const coll = assertDropAndRecreateCollection(db, "change_stream_shell_helper");
+const coll = assertDropAndRecreateCollection(db, "change_stream_shell_helper");
- assert.commandWorked(db.adminCommand({"setParameter": 1, "logLevel": 5}));
+assert.commandWorked(db.adminCommand({"setParameter": 1, "logLevel": 5}));
- function checkNextChange(cursor, expected) {
- assert.soon(() => cursor.hasNext());
- const nextObj = cursor.next();
- assertChangeStreamEventEq(nextObj, expected);
- return nextObj;
- }
-
- function testCommandIsCalled(testFunc, checkFunc) {
- const mongoRunCommandOriginal = Mongo.prototype.runCommand;
+function checkNextChange(cursor, expected) {
+ assert.soon(() => cursor.hasNext());
+ const nextObj = cursor.next();
+ assertChangeStreamEventEq(nextObj, expected);
+ return nextObj;
+}
- const sentinel = {};
- let cmdObjSeen = sentinel;
+function testCommandIsCalled(testFunc, checkFunc) {
+ const mongoRunCommandOriginal = Mongo.prototype.runCommand;
- Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
- cmdObjSeen = cmdObj;
- return mongoRunCommandOriginal.apply(this, arguments);
- };
+ const sentinel = {};
+ let cmdObjSeen = sentinel;
- try {
- assert.doesNotThrow(testFunc);
- } finally {
- Mongo.prototype.runCommand = mongoRunCommandOriginal;
- }
-
- if (cmdObjSeen === sentinel) {
- throw new Error("Mongo.prototype.runCommand() was never called: " +
- testFunc.toString());
- }
+ Mongo.prototype.runCommand = function runCommandSpy(dbName, cmdObj, options) {
+ cmdObjSeen = cmdObj;
+ return mongoRunCommandOriginal.apply(this, arguments);
+ };
- checkFunc(cmdObjSeen);
+ try {
+ assert.doesNotThrow(testFunc);
+ } finally {
+ Mongo.prototype.runCommand = mongoRunCommandOriginal;
}
- jsTestLog("Testing watch() without options");
- let changeStreamCursor = coll.watch();
-
- assert(!changeStreamCursor.hasNext());
-
- // Write the first document into the collection. We will save the resume token from this change.
- assert.writeOK(coll.insert({_id: 0, x: 1}));
- let resumeToken;
-
- // Test that each of the change stream cursors picks up the change.
- assert.soon(() => changeStreamCursor.hasNext());
- let change = changeStreamCursor.next();
- assert(!changeStreamCursor.hasNext());
- let expected = {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, x: 1},
- ns: {db: "test", coll: coll.getName()},
- operationType: "insert",
- };
- assert("_id" in change, "Got unexpected change: " + tojson(change));
- // Remember the _id of the first op to resume the stream.
- resumeToken = change._id;
- // Remove the fields we cannot predict, then test that the change is as expected.
- delete change._id;
- delete change.clusterTime;
- assert.docEq(change, expected);
-
- jsTestLog("Testing watch() with pipeline");
- changeStreamCursor = coll.watch([{$project: {clusterTime: 1, docId: "$documentKey._id"}}]);
-
- // Store the cluster time of the insert as the timestamp to start from.
- const resumeTime =
- assert.commandWorked(db.runCommand({insert: coll.getName(), documents: [{_id: 1, x: 1}]}))
- .operationTime;
- jsTestLog("Insert of document with _id 1 got operationTime " + tojson(resumeTime));
-
- const changeForInsert = checkNextChange(changeStreamCursor, {docId: 1});
- jsTestLog("Change stream event for document with _id 1 reports clusterTime " +
- tojson(changeForInsert.clusterTime));
-
- // We expect the clusterTime returned by the change stream event and the operationTime returned
- // by the insert to be the same.
- assert.eq(changeForInsert.clusterTime, resumeTime);
-
- jsTestLog("Testing watch() with pipeline and resumeAfter");
- changeStreamCursor =
- coll.watch([{$project: {docId: "$documentKey._id"}}], {resumeAfter: resumeToken});
- checkNextChange(changeStreamCursor, {docId: 1});
-
- jsTestLog("Testing watch() with pipeline and startAfter");
- changeStreamCursor =
- coll.watch([{$project: {docId: "$documentKey._id"}}], {startAfter: resumeToken});
- checkNextChange(changeStreamCursor, {docId: 1});
-
- jsTestLog("Testing watch() with pipeline and startAtOperationTime");
- changeStreamCursor =
- coll.watch([{$project: {docId: "$documentKey._id"}}], {startAtOperationTime: resumeTime});
- checkNextChange(changeStreamCursor, {docId: 1});
-
- jsTestLog("Testing watch() with updateLookup");
- changeStreamCursor = coll.watch([], {fullDocument: "updateLookup"});
-
- assert.writeOK(coll.update({_id: 0}, {$set: {x: 10}}));
- expected = {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, x: 10},
- ns: {db: "test", coll: coll.getName()},
- operationType: "update",
- updateDescription: {removedFields: [], updatedFields: {x: 10}},
- };
- checkNextChange(changeStreamCursor, expected);
-
- jsTestLog("Testing watch() with batchSize");
- // Only test mongod because mongos uses batch size 0 for aggregate commands internally to
- // establish cursors quickly. GetMore on mongos doesn't respect batch size due to SERVER-31992.
- const isMongos = FixtureHelpers.isMongos(db);
- if (!isMongos) {
- // Increase a field by 5 times and verify the batch size is respected.
- for (let i = 0; i < 5; i++) {
- assert.writeOK(coll.update({_id: 1}, {$inc: {x: 1}}));
- }
+ if (cmdObjSeen === sentinel) {
+ throw new Error("Mongo.prototype.runCommand() was never called: " + testFunc.toString());
+ }
- // Only watch the "update" changes of the specific doc since the beginning.
- changeStreamCursor =
- coll.watch([{$match: {documentKey: {_id: 1}, operationType: "update"}}],
- {resumeAfter: resumeToken, batchSize: 2});
-
- // Check the first batch.
- assert.eq(changeStreamCursor.objsLeftInBatch(), 2);
- // Consume the first batch.
- assert(changeStreamCursor.hasNext());
- changeStreamCursor.next();
- assert(changeStreamCursor.hasNext());
- changeStreamCursor.next();
- // Confirm that the batch is empty.
- assert.eq(changeStreamCursor.objsLeftInBatch(), 0);
-
- // Check the batch returned by getMore.
- assert(changeStreamCursor.hasNext());
- assert.eq(changeStreamCursor.objsLeftInBatch(), 2);
- changeStreamCursor.next();
- assert(changeStreamCursor.hasNext());
- changeStreamCursor.next();
- assert.eq(changeStreamCursor.objsLeftInBatch(), 0);
- // There are more changes coming, just not in the batch.
- assert(changeStreamCursor.hasNext());
+ checkFunc(cmdObjSeen);
+}
+
+jsTestLog("Testing watch() without options");
+let changeStreamCursor = coll.watch();
+
+assert(!changeStreamCursor.hasNext());
+
+// Write the first document into the collection. We will save the resume token from this change.
+assert.writeOK(coll.insert({_id: 0, x: 1}));
+let resumeToken;
+
+// Test that each of the change stream cursors picks up the change.
+assert.soon(() => changeStreamCursor.hasNext());
+let change = changeStreamCursor.next();
+assert(!changeStreamCursor.hasNext());
+let expected = {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, x: 1},
+ ns: {db: "test", coll: coll.getName()},
+ operationType: "insert",
+};
+assert("_id" in change, "Got unexpected change: " + tojson(change));
+// Remember the _id of the first op to resume the stream.
+resumeToken = change._id;
+// Remove the fields we cannot predict, then test that the change is as expected.
+delete change._id;
+delete change.clusterTime;
+assert.docEq(change, expected);
+
+jsTestLog("Testing watch() with pipeline");
+changeStreamCursor = coll.watch([{$project: {clusterTime: 1, docId: "$documentKey._id"}}]);
+
+// Store the cluster time of the insert as the timestamp to start from.
+const resumeTime =
+ assert.commandWorked(db.runCommand({insert: coll.getName(), documents: [{_id: 1, x: 1}]}))
+ .operationTime;
+jsTestLog("Insert of document with _id 1 got operationTime " + tojson(resumeTime));
+
+const changeForInsert = checkNextChange(changeStreamCursor, {docId: 1});
+jsTestLog("Change stream event for document with _id 1 reports clusterTime " +
+ tojson(changeForInsert.clusterTime));
+
+// We expect the clusterTime returned by the change stream event and the operationTime returned
+// by the insert to be the same.
+assert.eq(changeForInsert.clusterTime, resumeTime);
+
+jsTestLog("Testing watch() with pipeline and resumeAfter");
+changeStreamCursor =
+ coll.watch([{$project: {docId: "$documentKey._id"}}], {resumeAfter: resumeToken});
+checkNextChange(changeStreamCursor, {docId: 1});
+
+jsTestLog("Testing watch() with pipeline and startAfter");
+changeStreamCursor =
+ coll.watch([{$project: {docId: "$documentKey._id"}}], {startAfter: resumeToken});
+checkNextChange(changeStreamCursor, {docId: 1});
+
+jsTestLog("Testing watch() with pipeline and startAtOperationTime");
+changeStreamCursor =
+ coll.watch([{$project: {docId: "$documentKey._id"}}], {startAtOperationTime: resumeTime});
+checkNextChange(changeStreamCursor, {docId: 1});
+
+jsTestLog("Testing watch() with updateLookup");
+changeStreamCursor = coll.watch([], {fullDocument: "updateLookup"});
+
+assert.writeOK(coll.update({_id: 0}, {$set: {x: 10}}));
+expected = {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, x: 10},
+ ns: {db: "test", coll: coll.getName()},
+ operationType: "update",
+ updateDescription: {removedFields: [], updatedFields: {x: 10}},
+};
+checkNextChange(changeStreamCursor, expected);
+
+jsTestLog("Testing watch() with batchSize");
+// Only test mongod because mongos uses batch size 0 for aggregate commands internally to
+// establish cursors quickly. GetMore on mongos doesn't respect batch size due to SERVER-31992.
+const isMongos = FixtureHelpers.isMongos(db);
+if (!isMongos) {
+ // Increase a field by 5 times and verify the batch size is respected.
+ for (let i = 0; i < 5; i++) {
+ assert.writeOK(coll.update({_id: 1}, {$inc: {x: 1}}));
}
- jsTestLog("Testing watch() with maxAwaitTimeMS");
- changeStreamCursor = coll.watch([], {maxAwaitTimeMS: 500});
- testCommandIsCalled(() => assert(!changeStreamCursor.hasNext()), (cmdObj) => {
- assert.eq("getMore",
- Object.keys(cmdObj)[0],
- "expected getMore command, but was: " + tojson(cmdObj));
- assert(cmdObj.hasOwnProperty("maxTimeMS"), "unexpected getMore command: " + tojson(cmdObj));
- assert.eq(500, cmdObj.maxTimeMS, "unexpected getMore command: " + tojson(cmdObj));
+ // Only watch the "update" changes of the specific doc since the beginning.
+ changeStreamCursor = coll.watch([{$match: {documentKey: {_id: 1}, operationType: "update"}}],
+ {resumeAfter: resumeToken, batchSize: 2});
+
+ // Check the first batch.
+ assert.eq(changeStreamCursor.objsLeftInBatch(), 2);
+ // Consume the first batch.
+ assert(changeStreamCursor.hasNext());
+ changeStreamCursor.next();
+ assert(changeStreamCursor.hasNext());
+ changeStreamCursor.next();
+ // Confirm that the batch is empty.
+ assert.eq(changeStreamCursor.objsLeftInBatch(), 0);
+
+ // Check the batch returned by getMore.
+ assert(changeStreamCursor.hasNext());
+ assert.eq(changeStreamCursor.objsLeftInBatch(), 2);
+ changeStreamCursor.next();
+ assert(changeStreamCursor.hasNext());
+ changeStreamCursor.next();
+ assert.eq(changeStreamCursor.objsLeftInBatch(), 0);
+ // There are more changes coming, just not in the batch.
+ assert(changeStreamCursor.hasNext());
+}
+
+jsTestLog("Testing watch() with maxAwaitTimeMS");
+changeStreamCursor = coll.watch([], {maxAwaitTimeMS: 500});
+testCommandIsCalled(() => assert(!changeStreamCursor.hasNext()), (cmdObj) => {
+ assert.eq(
+ "getMore", Object.keys(cmdObj)[0], "expected getMore command, but was: " + tojson(cmdObj));
+ assert(cmdObj.hasOwnProperty("maxTimeMS"), "unexpected getMore command: " + tojson(cmdObj));
+ assert.eq(500, cmdObj.maxTimeMS, "unexpected getMore command: " + tojson(cmdObj));
+});
+
+jsTestLog("Testing the cursor gets closed when the collection gets dropped");
+changeStreamCursor = coll.watch([{$project: {clusterTime: 0}}]);
+assert.writeOK(coll.insert({_id: 2, x: 1}));
+expected = {
+ documentKey: {_id: 2},
+ fullDocument: {_id: 2, x: 1},
+ ns: {db: "test", coll: coll.getName()},
+ operationType: "insert",
+};
+checkNextChange(changeStreamCursor, expected);
+assert(!changeStreamCursor.hasNext());
+assert(!changeStreamCursor.isClosed());
+assert(!changeStreamCursor.isExhausted());
+
+// Dropping the collection should trigger a drop notification.
+assertDropCollection(db, coll.getName());
+assert.soon(() => changeStreamCursor.hasNext());
+assert(!changeStreamCursor.isExhausted());
+expected = {
+ operationType: "drop",
+ ns: {db: db.getName(), coll: coll.getName()}
+};
+checkNextChange(changeStreamCursor, expected);
+// For single collection change streams, the drop should invalidate the stream.
+const invalidateDoc = assertInvalidateOp({cursor: changeStreamCursor, opType: "drop"});
+
+if (invalidateDoc) {
+ jsTestLog("Testing using the 'startAfter' option from the invalidate entry");
+ assert.commandWorked(coll.insert({_id: "After drop"}));
+ let resumedFromInvalidate =
+ coll.watch([], {startAfter: invalidateDoc._id, collation: {locale: "simple"}});
+
+ // We should see the new insert after starting over. However, in sharded cluster
+ // passthroughs we may see more drop and invalidate notifications before we see the insert.
+ let firstChangeAfterDrop;
+ assert.soon(() => {
+ if (!resumedFromInvalidate.hasNext()) {
+ return false;
+ }
+ const next = resumedFromInvalidate.next();
+ if (next.operationType == "invalidate") {
+ // Start again!
+ resumedFromInvalidate =
+ coll.watch([], {startAfter: next._id, collation: {locale: "simple"}});
+ return false;
+ }
+ if (next.operationType == "drop") {
+ return false;
+ }
+ // THIS is the change we wanted.
+ firstChangeAfterDrop = next;
+ return true;
});
- jsTestLog("Testing the cursor gets closed when the collection gets dropped");
- changeStreamCursor = coll.watch([{$project: {clusterTime: 0}}]);
- assert.writeOK(coll.insert({_id: 2, x: 1}));
- expected = {
- documentKey: {_id: 2},
- fullDocument: {_id: 2, x: 1},
- ns: {db: "test", coll: coll.getName()},
- operationType: "insert",
- };
- checkNextChange(changeStreamCursor, expected);
- assert(!changeStreamCursor.hasNext());
- assert(!changeStreamCursor.isClosed());
- assert(!changeStreamCursor.isExhausted());
-
- // Dropping the collection should trigger a drop notification.
- assertDropCollection(db, coll.getName());
- assert.soon(() => changeStreamCursor.hasNext());
- assert(!changeStreamCursor.isExhausted());
- expected = {operationType: "drop", ns: {db: db.getName(), coll: coll.getName()}};
- checkNextChange(changeStreamCursor, expected);
- // For single collection change streams, the drop should invalidate the stream.
- const invalidateDoc = assertInvalidateOp({cursor: changeStreamCursor, opType: "drop"});
-
- if (invalidateDoc) {
- jsTestLog("Testing using the 'startAfter' option from the invalidate entry");
- assert.commandWorked(coll.insert({_id: "After drop"}));
- let resumedFromInvalidate =
- coll.watch([], {startAfter: invalidateDoc._id, collation: {locale: "simple"}});
-
- // We should see the new insert after starting over. However, in sharded cluster
- // passthroughs we may see more drop and invalidate notifications before we see the insert.
- let firstChangeAfterDrop;
- assert.soon(() => {
- if (!resumedFromInvalidate.hasNext()) {
- return false;
- }
- const next = resumedFromInvalidate.next();
- if (next.operationType == "invalidate") {
- // Start again!
- resumedFromInvalidate =
- coll.watch([], {startAfter: next._id, collation: {locale: "simple"}});
- return false;
- }
- if (next.operationType == "drop") {
- return false;
- }
- // THIS is the change we wanted.
- firstChangeAfterDrop = next;
- return true;
- });
-
- assert.eq(firstChangeAfterDrop.documentKey._id, "After drop", tojson(change));
- }
+ assert.eq(firstChangeAfterDrop.documentKey._id, "After drop", tojson(change));
+}
}());
diff --git a/jstests/change_streams/start_at_cluster_time.js b/jstests/change_streams/start_at_cluster_time.js
index 484de4c43c2..2edcb530e20 100644
--- a/jstests/change_streams/start_at_cluster_time.js
+++ b/jstests/change_streams/start_at_cluster_time.js
@@ -1,80 +1,79 @@
// Tests resuming change streams based on cluster time.
(function() {
- "use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+"use strict";
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- const coll = assertDropAndRecreateCollection(db, jsTestName());
+const coll = assertDropAndRecreateCollection(db, jsTestName());
- const testStartTime = db.runCommand({isMaster: 1}).$clusterTime.clusterTime;
+const testStartTime = db.runCommand({isMaster: 1}).$clusterTime.clusterTime;
- // Write a document to each chunk, and wait for replication.
- assert.writeOK(coll.insert({_id: -1}, {writeConcern: {w: "majority"}}));
- assert.writeOK(coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+// Write a document to each chunk, and wait for replication.
+assert.writeOK(coll.insert({_id: -1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- // Perform two updates, then use a change stream to capture the cluster time of the first update
- // to be resumed from.
- const streamToFindClusterTime = coll.watch();
- assert.writeOK(coll.update({_id: -1}, {$set: {updated: true}}));
- assert.writeOK(coll.update({_id: 1}, {$set: {updated: true}}));
- assert.soon(() => streamToFindClusterTime.hasNext());
- let next = streamToFindClusterTime.next();
- assert.eq(next.operationType, "update");
- assert.eq(next.documentKey, {_id: -1});
- const timeOfFirstUpdate = next.clusterTime;
+// Perform two updates, then use a change stream to capture the cluster time of the first update
+// to be resumed from.
+const streamToFindClusterTime = coll.watch();
+assert.writeOK(coll.update({_id: -1}, {$set: {updated: true}}));
+assert.writeOK(coll.update({_id: 1}, {$set: {updated: true}}));
+assert.soon(() => streamToFindClusterTime.hasNext());
+let next = streamToFindClusterTime.next();
+assert.eq(next.operationType, "update");
+assert.eq(next.documentKey, {_id: -1});
+const timeOfFirstUpdate = next.clusterTime;
- let changeStream = coll.watch([], {startAtOperationTime: timeOfFirstUpdate});
+let changeStream = coll.watch([], {startAtOperationTime: timeOfFirstUpdate});
- // Test that starting at the cluster time is inclusive of the first update, so we should see
- // both updates in the new stream.
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "update", tojson(next));
- assert.eq(next.documentKey._id, -1, tojson(next));
+// Test that starting at the cluster time is inclusive of the first update, so we should see
+// both updates in the new stream.
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "update", tojson(next));
+assert.eq(next.documentKey._id, -1, tojson(next));
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "update", tojson(next));
- assert.eq(next.documentKey._id, 1, tojson(next));
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "update", tojson(next));
+assert.eq(next.documentKey._id, 1, tojson(next));
- // Test that startAtOperationTime is not allowed alongside resumeAfter.
- assert.commandFailedWithCode(db.runCommand({
- aggregate: coll.getName(),
- pipeline:
- [{$changeStream: {startAtOperationTime: timeOfFirstUpdate, resumeAfter: next._id}}],
- cursor: {}
- }),
- 40674);
+// Test that startAtOperationTime is not allowed alongside resumeAfter.
+assert.commandFailedWithCode(db.runCommand({
+ aggregate: coll.getName(),
+ pipeline: [{$changeStream: {startAtOperationTime: timeOfFirstUpdate, resumeAfter: next._id}}],
+ cursor: {}
+}),
+ 40674);
- // Test that resuming from a time in the future will wait for that time to come.
- let resumeTimeFarFuture = db.runCommand({isMaster: 1}).$clusterTime.clusterTime;
- resumeTimeFarFuture =
- new Timestamp(resumeTimeFarFuture.getTime() + 60 * 60 * 6, 1); // 6 hours in the future
+// Test that resuming from a time in the future will wait for that time to come.
+let resumeTimeFarFuture = db.runCommand({isMaster: 1}).$clusterTime.clusterTime;
+resumeTimeFarFuture =
+ new Timestamp(resumeTimeFarFuture.getTime() + 60 * 60 * 6, 1); // 6 hours in the future
- let changeStreamFuture = coll.watch([], {startAtOperationTime: resumeTimeFarFuture});
+let changeStreamFuture = coll.watch([], {startAtOperationTime: resumeTimeFarFuture});
- // Resume the change stream from the start of the test and verify it picks up the changes to the
- // collection. Namely, it should see two inserts followed by two updates.
- changeStream = coll.watch([], {startAtOperationTime: testStartTime});
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "insert", tojson(next));
- assert.eq(next.documentKey._id, -1, tojson(next));
+// Resume the change stream from the start of the test and verify it picks up the changes to the
+// collection. Namely, it should see two inserts followed by two updates.
+changeStream = coll.watch([], {startAtOperationTime: testStartTime});
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "insert", tojson(next));
+assert.eq(next.documentKey._id, -1, tojson(next));
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "insert", tojson(next));
- assert.eq(next.documentKey._id, 1, tojson(next));
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "insert", tojson(next));
+assert.eq(next.documentKey._id, 1, tojson(next));
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "update", tojson(next));
- assert.eq(next.documentKey._id, -1, tojson(next));
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "update", tojson(next));
+assert.eq(next.documentKey._id, -1, tojson(next));
- assert.soon(() => changeStream.hasNext());
- next = changeStream.next();
- assert.eq(next.operationType, "update", tojson(next));
- assert.eq(next.documentKey._id, 1, tojson(next));
+assert.soon(() => changeStream.hasNext());
+next = changeStream.next();
+assert.eq(next.operationType, "update", tojson(next));
+assert.eq(next.documentKey._id, 1, tojson(next));
- // Verify that the change stream resumed from far into the future does not see any changes.
- assert(!changeStreamFuture.hasNext());
+// Verify that the change stream resumed from far into the future does not see any changes.
+assert(!changeStreamFuture.hasNext());
})();
diff --git a/jstests/change_streams/whitelist.js b/jstests/change_streams/whitelist.js
index 6b86604f8a8..cd5716b9073 100644
--- a/jstests/change_streams/whitelist.js
+++ b/jstests/change_streams/whitelist.js
@@ -3,31 +3,31 @@
*/
(function() {
- "use strict";
+"use strict";
- load('jstests/aggregation/extras/utils.js'); // For assertErrorCode.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load('jstests/aggregation/extras/utils.js'); // For assertErrorCode.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- const coll = assertDropAndRecreateCollection(db, "change_stream_whitelist");
+const coll = assertDropAndRecreateCollection(db, "change_stream_whitelist");
- // Bare-bones $changeStream pipeline which will be augmented during tests.
- const changeStream = [{$changeStream: {}}];
+// Bare-bones $changeStream pipeline which will be augmented during tests.
+const changeStream = [{$changeStream: {}}];
- // List of non-$changeStream stages which are explicitly whitelisted.
- const whitelist = [
- {$match: {_id: {$exists: true}}},
- {$project: {_id: 1}},
- {$addFields: {newField: 1}},
- {$set: {newField: 1}},
- {$replaceRoot: {newRoot: {_id: "$_id"}}},
- {$replaceWith: {_id: "$_id"}},
- {$redact: "$$DESCEND"}
- ];
+// List of non-$changeStream stages which are explicitly whitelisted.
+const whitelist = [
+ {$match: {_id: {$exists: true}}},
+ {$project: {_id: 1}},
+ {$addFields: {newField: 1}},
+ {$set: {newField: 1}},
+ {$replaceRoot: {newRoot: {_id: "$_id"}}},
+ {$replaceWith: {_id: "$_id"}},
+ {$redact: "$$DESCEND"}
+];
- // List of stages which the whitelist mechanism will prevent from running in a $changeStream.
- // Does not include stages which are blacklisted but already implicitly prohibited, e.g. both
- // $currentOp and $changeStream must be the first stage in a pipeline.
- const blacklist = [
+// List of stages which the whitelist mechanism will prevent from running in a $changeStream.
+// Does not include stages which are blacklisted but already implicitly prohibited, e.g. both
+// $currentOp and $changeStream must be the first stage in a pipeline.
+const blacklist = [
{$group: {_id: "$_id"}},
{$sort: {_id: 1}},
{$skip: 100},
@@ -48,18 +48,18 @@
{$facet: {facetPipe: [{$match: {_id: {$exists: true}}}]}}
];
- // Verify that each of the whitelisted stages are permitted to run in a $changeStream.
- for (let allowedStage of whitelist) {
- assert.commandWorked(db.runCommand(
- {aggregate: coll.getName(), pipeline: changeStream.concat(allowedStage), cursor: {}}));
- }
-
- // Verify that all of the whitelisted stages are able to run in a $changeStream together.
+// Verify that each of the whitelisted stages are permitted to run in a $changeStream.
+for (let allowedStage of whitelist) {
assert.commandWorked(db.runCommand(
- {aggregate: coll.getName(), pipeline: changeStream.concat(whitelist), cursor: {}}));
+ {aggregate: coll.getName(), pipeline: changeStream.concat(allowedStage), cursor: {}}));
+}
+
+// Verify that all of the whitelisted stages are able to run in a $changeStream together.
+assert.commandWorked(db.runCommand(
+ {aggregate: coll.getName(), pipeline: changeStream.concat(whitelist), cursor: {}}));
- // Verify that a $changeStream pipeline fails to validate if a blacklisted stage is present.
- for (let bannedStage of blacklist) {
- assertErrorCode(coll, changeStream.concat(bannedStage), ErrorCodes.IllegalOperation);
- }
+// Verify that a $changeStream pipeline fails to validate if a blacklisted stage is present.
+for (let bannedStage of blacklist) {
+ assertErrorCode(coll, changeStream.concat(bannedStage), ErrorCodes.IllegalOperation);
+}
}());
diff --git a/jstests/change_streams/whole_cluster.js b/jstests/change_streams/whole_cluster.js
index b95453d12bc..7d2d3f22dbb 100644
--- a/jstests/change_streams/whole_cluster.js
+++ b/jstests/change_streams/whole_cluster.js
@@ -1,133 +1,133 @@
// Basic tests for $changeStream against all databases in the cluster.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest and
- // assert[Valid|Invalid]ChangeStreamNss.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest and
+ // assert[Valid|Invalid]ChangeStreamNss.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- db = db.getSiblingDB(jsTestName());
- const adminDB = db.getSiblingDB("admin");
- const otherDB = db.getSiblingDB(jsTestName() + "_other");
+db = db.getSiblingDB(jsTestName());
+const adminDB = db.getSiblingDB("admin");
+const otherDB = db.getSiblingDB(jsTestName() + "_other");
- // Drop and recreate the collections to be used in this set of tests.
- assertDropAndRecreateCollection(db, "t1");
- assertDropAndRecreateCollection(otherDB, "t2");
+// Drop and recreate the collections to be used in this set of tests.
+assertDropAndRecreateCollection(db, "t1");
+assertDropAndRecreateCollection(otherDB, "t2");
- // Test that a change stream can be opened on the admin database if {allChangesForCluster:true}
- // is specified.
- assertValidChangeStreamNss("admin", 1, {allChangesForCluster: true});
- // Test that a change stream cannot be opened on the admin database if a collection is
- // specified, even with {allChangesForCluster:true}.
- assertInvalidChangeStreamNss("admin", "testcoll", {allChangesForCluster: true});
- // Test that a change stream cannot be opened on a database other than admin if
- // {allChangesForCluster:true} is specified.
- assertInvalidChangeStreamNss(db.getName(), 1, {allChangesForCluster: true});
+// Test that a change stream can be opened on the admin database if {allChangesForCluster:true}
+// is specified.
+assertValidChangeStreamNss("admin", 1, {allChangesForCluster: true});
+// Test that a change stream cannot be opened on the admin database if a collection is
+// specified, even with {allChangesForCluster:true}.
+assertInvalidChangeStreamNss("admin", "testcoll", {allChangesForCluster: true});
+// Test that a change stream cannot be opened on a database other than admin if
+// {allChangesForCluster:true} is specified.
+assertInvalidChangeStreamNss(db.getName(), 1, {allChangesForCluster: true});
- let cst = new ChangeStreamTest(adminDB);
- let cursor = cst.startWatchingAllChangesForCluster();
+let cst = new ChangeStreamTest(adminDB);
+let cursor = cst.startWatchingAllChangesForCluster();
- // Test that if there are no changes, we return an empty batch.
- assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
+// Test that if there are no changes, we return an empty batch.
+assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
- // Test that the change stream returns an inserted doc.
- assert.writeOK(db.t1.insert({_id: 0, a: 1}));
- let expected = {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, a: 1},
- ns: {db: db.getName(), coll: "t1"},
- operationType: "insert",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+// Test that the change stream returns an inserted doc.
+assert.writeOK(db.t1.insert({_id: 0, a: 1}));
+let expected = {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, a: 1},
+ ns: {db: db.getName(), coll: "t1"},
+ operationType: "insert",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
- // Test that the change stream returns another inserted doc in a different database.
- assert.writeOK(otherDB.t2.insert({_id: 0, a: 2}));
- expected = {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, a: 2},
- ns: {db: otherDB.getName(), coll: "t2"},
- operationType: "insert",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+// Test that the change stream returns another inserted doc in a different database.
+assert.writeOK(otherDB.t2.insert({_id: 0, a: 2}));
+expected = {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, a: 2},
+ ns: {db: otherDB.getName(), coll: "t2"},
+ operationType: "insert",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
- // Test that the change stream returns an inserted doc on a user-created database whose name
- // includes 'admin', 'local', or 'config'.
- const validUserDBs = [
- "admin1",
- "1admin",
- "_admin_",
- "local_",
- "_local",
- "_local_",
- "config_",
- "_config",
- "_config_"
+// Test that the change stream returns an inserted doc on a user-created database whose name
+// includes 'admin', 'local', or 'config'.
+const validUserDBs = [
+ "admin1",
+ "1admin",
+ "_admin_",
+ "local_",
+ "_local",
+ "_local_",
+ "config_",
+ "_config",
+ "_config_"
+];
+validUserDBs.forEach(dbName => {
+ assert.writeOK(db.getSiblingDB(dbName).test.insert({_id: 0, a: 1}));
+ expected = [
+ {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, a: 1},
+ ns: {db: dbName, coll: "test"},
+ operationType: "insert",
+ },
];
- validUserDBs.forEach(dbName => {
- assert.writeOK(db.getSiblingDB(dbName).test.insert({_id: 0, a: 1}));
- expected = [
- {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, a: 1},
- ns: {db: dbName, coll: "test"},
- operationType: "insert",
- },
- ];
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
- });
+ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+});
- // Test that the change stream returns an inserted doc on a user-created collection whose name
- // includes "system" but is not considered an internal collection.
- const validSystemColls = ["system", "systems.views", "ssystem.views", "test.system"];
- validSystemColls.forEach(collName => {
- assert.writeOK(db.getCollection(collName).insert({_id: 0, a: 1}));
- expected = [
- {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, a: 1},
- ns: {db: db.getName(), coll: collName},
- operationType: "insert",
- },
- ];
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
- });
+// Test that the change stream returns an inserted doc on a user-created collection whose name
+// includes "system" but is not considered an internal collection.
+const validSystemColls = ["system", "systems.views", "ssystem.views", "test.system"];
+validSystemColls.forEach(collName => {
+ assert.writeOK(db.getCollection(collName).insert({_id: 0, a: 1}));
+ expected = [
+ {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, a: 1},
+ ns: {db: db.getName(), coll: collName},
+ operationType: "insert",
+ },
+ ];
+ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+});
- // Test that the change stream filters out operations on any collection in the 'admin', 'local',
- // or 'config' databases.
- const filteredDBs = ["admin", "local", "config"];
- filteredDBs.forEach(dbName => {
- // Not allowed to use 'local' db through mongos.
- if (FixtureHelpers.isMongos(db) && dbName == "local")
- return;
+// Test that the change stream filters out operations on any collection in the 'admin', 'local',
+// or 'config' databases.
+const filteredDBs = ["admin", "local", "config"];
+filteredDBs.forEach(dbName => {
+ // Not allowed to use 'local' db through mongos.
+ if (FixtureHelpers.isMongos(db) && dbName == "local")
+ return;
- assert.writeOK(db.getSiblingDB(dbName).test.insert({_id: 0, a: 1}));
- // Insert to the test collection to ensure that the change stream has something to
- // return.
- assert.writeOK(db.t1.insert({_id: dbName}));
- expected = [
- {
- documentKey: {_id: dbName},
- fullDocument: {_id: dbName},
- ns: {db: db.getName(), coll: "t1"},
- operationType: "insert",
- },
- ];
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
- // Drop the test collection to avoid duplicate key errors if this test is run multiple
- // times.
- assertDropCollection(db.getSiblingDB(dbName), "test");
- });
+ assert.writeOK(db.getSiblingDB(dbName).test.insert({_id: 0, a: 1}));
+ // Insert to the test collection to ensure that the change stream has something to
+ // return.
+ assert.writeOK(db.t1.insert({_id: dbName}));
+ expected = [
+ {
+ documentKey: {_id: dbName},
+ fullDocument: {_id: dbName},
+ ns: {db: db.getName(), coll: "t1"},
+ operationType: "insert",
+ },
+ ];
+ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+ // Drop the test collection to avoid duplicate key errors if this test is run multiple
+ // times.
+ assertDropCollection(db.getSiblingDB(dbName), "test");
+});
- // Dropping a database should generate drop entries for each collection followed by a database
- // drop.
- assert.commandWorked(otherDB.dropDatabase());
- cst.assertDatabaseDrop({cursor: cursor, db: otherDB});
+// Dropping a database should generate drop entries for each collection followed by a database
+// drop.
+assert.commandWorked(otherDB.dropDatabase());
+cst.assertDatabaseDrop({cursor: cursor, db: otherDB});
- // Drop the remaining databases and clean up the test.
- assert.commandWorked(db.dropDatabase());
- validUserDBs.forEach(dbName => {
- db.getSiblingDB(dbName).dropDatabase();
- });
- cst.cleanUp();
+// Drop the remaining databases and clean up the test.
+assert.commandWorked(db.dropDatabase());
+validUserDBs.forEach(dbName => {
+ db.getSiblingDB(dbName).dropDatabase();
+});
+cst.cleanUp();
}());
diff --git a/jstests/change_streams/whole_cluster_metadata_notifications.js b/jstests/change_streams/whole_cluster_metadata_notifications.js
index ec7da470842..9a9d8c6efd5 100644
--- a/jstests/change_streams/whole_cluster_metadata_notifications.js
+++ b/jstests/change_streams/whole_cluster_metadata_notifications.js
@@ -1,280 +1,276 @@
// Tests of metadata notifications for a $changeStream on a whole cluster.
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
- load('jstests/replsets/libs/two_phase_drops.js'); // For 'TwoPhaseDropCollectionTest'.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
+load('jstests/replsets/libs/two_phase_drops.js'); // For 'TwoPhaseDropCollectionTest'.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- // Define two databases. We will conduct our tests by creating one collection in each.
- const testDB1 = db.getSiblingDB(jsTestName()),
- testDB2 = db.getSiblingDB(jsTestName() + "_other");
- const adminDB = db.getSiblingDB("admin");
+// Define two databases. We will conduct our tests by creating one collection in each.
+const testDB1 = db.getSiblingDB(jsTestName()), testDB2 = db.getSiblingDB(jsTestName() + "_other");
+const adminDB = db.getSiblingDB("admin");
- assert.commandWorked(testDB1.dropDatabase());
- assert.commandWorked(testDB2.dropDatabase());
+assert.commandWorked(testDB1.dropDatabase());
+assert.commandWorked(testDB2.dropDatabase());
- // Create one collection on each database.
- let [db1Coll, db2Coll] =
- [testDB1, testDB2].map((testDB) => assertDropAndRecreateCollection(testDB, "test"));
+// Create one collection on each database.
+let [db1Coll, db2Coll] =
+ [testDB1, testDB2].map((testDB) => assertDropAndRecreateCollection(testDB, "test"));
- // Create a ChangeStreamTest on the 'admin' db. Cluster-wide change streams can only be opened
- // on admin.
- let cst = new ChangeStreamTest(adminDB);
- let aggCursor = cst.startWatchingAllChangesForCluster();
+// Create a ChangeStreamTest on the 'admin' db. Cluster-wide change streams can only be opened
+// on admin.
+let cst = new ChangeStreamTest(adminDB);
+let aggCursor = cst.startWatchingAllChangesForCluster();
- // Generate oplog entries of type insert, update, and delete across both databases.
- for (let coll of[db1Coll, db2Coll]) {
- assert.writeOK(coll.insert({_id: 1}));
- assert.writeOK(coll.update({_id: 1}, {$set: {a: 1}}));
- assert.writeOK(coll.remove({_id: 1}));
- }
+// Generate oplog entries of type insert, update, and delete across both databases.
+for (let coll of [db1Coll, db2Coll]) {
+ assert.writeOK(coll.insert({_id: 1}));
+ assert.writeOK(coll.update({_id: 1}, {$set: {a: 1}}));
+ assert.writeOK(coll.remove({_id: 1}));
+}
- // Drop the second database, which should generate a 'drop' entry for the collection followed
- // by a 'dropDatabase' entry.
- assert.commandWorked(testDB2.dropDatabase());
+// Drop the second database, which should generate a 'drop' entry for the collection followed
+// by a 'dropDatabase' entry.
+assert.commandWorked(testDB2.dropDatabase());
- // We should get 6 oplog entries; three ops of type insert, update, delete from each database.
- for (let expectedDB of[testDB1, testDB2]) {
- let change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(change.ns.db, expectedDB.getName(), tojson(change));
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "update", tojson(change));
- assert.eq(change.ns.db, expectedDB.getName(), tojson(change));
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "delete", tojson(change));
- assert.eq(change.ns.db, expectedDB.getName(), tojson(change));
- }
- cst.assertDatabaseDrop({cursor: aggCursor, db: testDB2});
+// We should get 6 oplog entries; three ops of type insert, update, delete from each database.
+for (let expectedDB of [testDB1, testDB2]) {
+ let change = cst.getOneChange(aggCursor);
+ assert.eq(change.operationType, "insert", tojson(change));
+ assert.eq(change.ns.db, expectedDB.getName(), tojson(change));
+ change = cst.getOneChange(aggCursor);
+ assert.eq(change.operationType, "update", tojson(change));
+ assert.eq(change.ns.db, expectedDB.getName(), tojson(change));
+ change = cst.getOneChange(aggCursor);
+ assert.eq(change.operationType, "delete", tojson(change));
+ assert.eq(change.ns.db, expectedDB.getName(), tojson(change));
+}
+cst.assertDatabaseDrop({cursor: aggCursor, db: testDB2});
- // Test that a cluster-wide change stream can be resumed using a token from a collection which
- // has been dropped.
- db1Coll = assertDropAndRecreateCollection(testDB1, db1Coll.getName());
+// Test that a cluster-wide change stream can be resumed using a token from a collection which
+// has been dropped.
+db1Coll = assertDropAndRecreateCollection(testDB1, db1Coll.getName());
- // Get a valid resume token that the next change stream can use.
- aggCursor = cst.startWatchingAllChangesForCluster();
+// Get a valid resume token that the next change stream can use.
+aggCursor = cst.startWatchingAllChangesForCluster();
- assert.writeOK(db1Coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
+assert.writeOK(db1Coll.insert({_id: 1}, {writeConcern: {w: "majority"}}));
- let change = cst.getOneChange(aggCursor, false);
- const resumeToken = change._id;
-
- // For cluster-wide streams, it is possible to resume at a point before a collection is dropped,
- // even if the "drop" notification has not been received on the original stream yet.
- assertDropCollection(db1Coll, db1Coll.getName());
- // Wait for two-phase drop to complete, so that the UUID no longer exists.
- assert.soon(function() {
- return !TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(testDB1,
- db1Coll.getName());
- });
- assert.commandWorked(adminDB.runCommand({
- aggregate: 1,
- pipeline: [{$changeStream: {resumeAfter: resumeToken, allChangesForCluster: true}}],
- cursor: {}
- }));
-
- // Test that collection drops from any database result in "drop" notifications for the stream.
- [db1Coll, db2Coll] =
- [testDB1, testDB2].map((testDB) => assertDropAndRecreateCollection(testDB, "test"));
- let _idForTest = 0;
- for (let collToInvalidate of[db1Coll, db2Coll]) {
- // Start watching all changes in the cluster.
- aggCursor = cst.startWatchingAllChangesForCluster();
-
- let testDB = collToInvalidate.getDB();
-
- // Insert into the collections on both databases, and verify the change stream is able to
- // pick them up.
- for (let collToWrite of[db1Coll, db2Coll]) {
- assert.writeOK(collToWrite.insert({_id: _idForTest}));
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(change.documentKey._id, _idForTest);
- assert.eq(change.ns.db, collToWrite.getDB().getName());
- _idForTest++;
- }
-
- // Renaming the collection should generate a 'rename' notification. Skip this test when
- // running on a sharded collection, since these cannot be renamed.
- if (!FixtureHelpers.isSharded(collToInvalidate)) {
- assertDropAndRecreateCollection(testDB, collToInvalidate.getName());
- const collName = collToInvalidate.getName();
-
- // Start watching all changes in the cluster.
- aggCursor = cst.startWatchingAllChangesForCluster();
- assert.writeOK(collToInvalidate.renameCollection("renamed_coll"));
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges: [
- {
- operationType: "rename",
- ns: {db: testDB.getName(), coll: collToInvalidate.getName()},
- to: {db: testDB.getName(), coll: "renamed_coll"}
- },
- ]
- });
+let change = cst.getOneChange(aggCursor, false);
+const resumeToken = change._id;
- // Repeat the test, this time using the 'dropTarget' option with an existing target
- // collection.
- collToInvalidate = testDB.getCollection("renamed_coll");
- assertDropAndRecreateCollection(testDB, collName);
- assert.writeOK(testDB[collName].insert({_id: 0}));
- assert.writeOK(collToInvalidate.renameCollection(collName, true /* dropTarget */));
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges: [
- {
- operationType: "insert",
- ns: {db: testDB.getName(), coll: collName},
- documentKey: {_id: 0},
- fullDocument: {_id: 0}
- },
- {
- operationType: "rename",
- ns: {db: testDB.getName(), coll: "renamed_coll"},
- to: {db: testDB.getName(), coll: collName}
- }
- ]
- });
+// For cluster-wide streams, it is possible to resume at a point before a collection is dropped,
+// even if the "drop" notification has not been received on the original stream yet.
+assertDropCollection(db1Coll, db1Coll.getName());
+// Wait for two-phase drop to complete, so that the UUID no longer exists.
+assert.soon(function() {
+ return !TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(testDB1,
+ db1Coll.getName());
+});
+assert.commandWorked(adminDB.runCommand({
+ aggregate: 1,
+ pipeline: [{$changeStream: {resumeAfter: resumeToken, allChangesForCluster: true}}],
+ cursor: {}
+}));
- collToInvalidate = testDB[collName];
+// Test that collection drops from any database result in "drop" notifications for the stream.
+[db1Coll, db2Coll] =
+ [testDB1, testDB2].map((testDB) => assertDropAndRecreateCollection(testDB, "test"));
+let _idForTest = 0;
+for (let collToInvalidate of [db1Coll, db2Coll]) {
+ // Start watching all changes in the cluster.
+ aggCursor = cst.startWatchingAllChangesForCluster();
- // Test renaming a collection to a different database. Do not run this in the mongos
- // passthrough suites since we cannot guarantee the primary shard of the target database
- // and renameCollection requires the source and destination to be on the same shard.
- if (!FixtureHelpers.isMongos(testDB)) {
- const otherDB = testDB.getSiblingDB(testDB.getName() + "_rename_target");
- // Ensure the target database exists.
- const collOtherDB = assertDropAndRecreateCollection(otherDB, "test");
- assertDropCollection(otherDB, collOtherDB.getName());
- aggCursor = cst.startWatchingAllChangesForCluster();
- assert.commandWorked(testDB.adminCommand({
- renameCollection: collToInvalidate.getFullName(),
- to: collOtherDB.getFullName()
- }));
- // Do not check the 'ns' field since it will contain the namespace of the temp
- // collection created when renaming a collection across databases.
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "rename", tojson(change));
- assert.eq(change.to,
- {db: otherDB.getName(), coll: collOtherDB.getName()},
- tojson(change));
- // Rename across databases also drops the source collection after the collection is
- // copied over.
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges: [{
- operationType: "drop",
- ns: {db: testDB.getName(), coll: collToInvalidate.getName()}
- }]
- });
- }
+ let testDB = collToInvalidate.getDB();
- // Test the behavior of a change stream watching the target collection of a $out
- // aggregation stage.
- collToInvalidate.aggregate([{$out: "renamed_coll"}]);
- // Do not check the 'ns' field since it will contain the namespace of the temp
- // collection created by the $out stage, before renaming to 'renamed_coll'.
- const rename = cst.getOneChange(aggCursor);
- assert.eq(rename.operationType, "rename", tojson(rename));
- assert.eq(rename.to, {db: testDB.getName(), coll: "renamed_coll"}, tojson(rename));
+ // Insert into the collections on both databases, and verify the change stream is able to
+ // pick them up.
+ for (let collToWrite of [db1Coll, db2Coll]) {
+ assert.writeOK(collToWrite.insert({_id: _idForTest}));
+ change = cst.getOneChange(aggCursor);
+ assert.eq(change.operationType, "insert", tojson(change));
+ assert.eq(change.documentKey._id, _idForTest);
+ assert.eq(change.ns.db, collToWrite.getDB().getName());
+ _idForTest++;
+ }
- // The change stream should not be invalidated by the rename(s).
- assert.eq(0, cst.getNextBatch(aggCursor).nextBatch.length);
- assert.writeOK(collToInvalidate.insert({_id: 2}));
- assert.eq(cst.getOneChange(aggCursor).operationType, "insert");
+ // Renaming the collection should generate a 'rename' notification. Skip this test when
+ // running on a sharded collection, since these cannot be renamed.
+ if (!FixtureHelpers.isSharded(collToInvalidate)) {
+ assertDropAndRecreateCollection(testDB, collToInvalidate.getName());
+ const collName = collToInvalidate.getName();
- // Test that renaming a "system" collection to a user collection *does* return a rename
- // notification.
- assert.commandWorked(testDB.runCommand(
- {create: "view1", viewOn: collToInvalidate.getName(), pipeline: []}));
- assert.writeOK(testDB.system.views.renameCollection("non_system_collection"));
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges: [{
+ // Start watching all changes in the cluster.
+ aggCursor = cst.startWatchingAllChangesForCluster();
+ assert.writeOK(collToInvalidate.renameCollection("renamed_coll"));
+ cst.assertNextChangesEqual({
+ cursor: aggCursor,
+ expectedChanges: [
+ {
operationType: "rename",
- ns: {db: testDB.getName(), coll: "system.views"},
- to: {db: testDB.getName(), coll: "non_system_collection"}
- }],
- });
-
- // Test that renaming a "system" collection to a different "system" collection does not
- // result in a notification in the change stream.
- aggCursor = cst.startWatchingAllChangesForCluster();
- assert.commandWorked(testDB.runCommand(
- {create: "view1", viewOn: collToInvalidate.getName(), pipeline: []}));
- // Note that the target of the rename must be a valid "system" collection.
- assert.writeOK(testDB.system.views.renameCollection("system.users"));
- // Verify that the change stream filters out the rename above, instead returning the
- // next insert to the test collection.
- assert.writeOK(collToInvalidate.insert({_id: 1}));
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(change.ns, {db: testDB.getName(), coll: collToInvalidate.getName()});
+ ns: {db: testDB.getName(), coll: collToInvalidate.getName()},
+ to: {db: testDB.getName(), coll: "renamed_coll"}
+ },
+ ]
+ });
- // Test that renaming a user collection to a "system" collection *does* return a rename
- // notification.
- assert.writeOK(collToInvalidate.renameCollection("system.views"));
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges: [{
+ // Repeat the test, this time using the 'dropTarget' option with an existing target
+ // collection.
+ collToInvalidate = testDB.getCollection("renamed_coll");
+ assertDropAndRecreateCollection(testDB, collName);
+ assert.writeOK(testDB[collName].insert({_id: 0}));
+ assert.writeOK(collToInvalidate.renameCollection(collName, true /* dropTarget */));
+ cst.assertNextChangesEqual({
+ cursor: aggCursor,
+ expectedChanges: [
+ {
+ operationType: "insert",
+ ns: {db: testDB.getName(), coll: collName},
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0}
+ },
+ {
operationType: "rename",
- ns: {db: testDB.getName(), coll: collToInvalidate.getName()},
- to: {db: testDB.getName(), coll: "system.views"}
- }],
- });
+ ns: {db: testDB.getName(), coll: "renamed_coll"},
+ to: {db: testDB.getName(), coll: collName}
+ }
+ ]
+ });
- // Drop the "system.views" collection to avoid view catalog errors in subsequent tests.
- assertDropCollection(testDB, "system.views");
+ collToInvalidate = testDB[collName];
- // Recreate the test collection for the remainder of the test.
- assert.writeOK(collToInvalidate.insert({_id: 0}));
+ // Test renaming a collection to a different database. Do not run this in the mongos
+ // passthrough suites since we cannot guarantee the primary shard of the target database
+ // and renameCollection requires the source and destination to be on the same shard.
+ if (!FixtureHelpers.isMongos(testDB)) {
+ const otherDB = testDB.getSiblingDB(testDB.getName() + "_rename_target");
+ // Ensure the target database exists.
+ const collOtherDB = assertDropAndRecreateCollection(otherDB, "test");
+ assertDropCollection(otherDB, collOtherDB.getName());
+ aggCursor = cst.startWatchingAllChangesForCluster();
+ assert.commandWorked(testDB.adminCommand(
+ {renameCollection: collToInvalidate.getFullName(), to: collOtherDB.getFullName()}));
+ // Do not check the 'ns' field since it will contain the namespace of the temp
+ // collection created when renaming a collection across databases.
+ change = cst.getOneChange(aggCursor);
+ assert.eq(change.operationType, "rename", tojson(change));
+ assert.eq(
+ change.to, {db: otherDB.getName(), coll: collOtherDB.getName()}, tojson(change));
+ // Rename across databases also drops the source collection after the collection is
+ // copied over.
cst.assertNextChangesEqual({
cursor: aggCursor,
expectedChanges: [{
- operationType: "insert",
- ns: {db: testDB.getName(), coll: collToInvalidate.getName()},
- documentKey: {_id: 0},
- fullDocument: {_id: 0}
+ operationType: "drop",
+ ns: {db: testDB.getName(), coll: collToInvalidate.getName()}
}]
});
}
- // Dropping a collection should generate a 'drop' entry.
- assertDropCollection(testDB, collToInvalidate.getName());
- // Insert to the test collection to queue up another change after the drop. This is needed
- // since the number of 'drop' notifications is not deterministic in the sharded passthrough
- // suites.
- assert.writeOK(collToInvalidate.insert({_id: 0}));
- cst.consumeDropUpTo({
+ // Test the behavior of a change stream watching the target collection of a $out
+ // aggregation stage.
+ collToInvalidate.aggregate([{$out: "renamed_coll"}]);
+ // Do not check the 'ns' field since it will contain the namespace of the temp
+ // collection created by the $out stage, before renaming to 'renamed_coll'.
+ const rename = cst.getOneChange(aggCursor);
+ assert.eq(rename.operationType, "rename", tojson(rename));
+ assert.eq(rename.to, {db: testDB.getName(), coll: "renamed_coll"}, tojson(rename));
+
+ // The change stream should not be invalidated by the rename(s).
+ assert.eq(0, cst.getNextBatch(aggCursor).nextBatch.length);
+ assert.writeOK(collToInvalidate.insert({_id: 2}));
+ assert.eq(cst.getOneChange(aggCursor).operationType, "insert");
+
+ // Test that renaming a "system" collection to a user collection *does* return a rename
+ // notification.
+ assert.commandWorked(
+ testDB.runCommand({create: "view1", viewOn: collToInvalidate.getName(), pipeline: []}));
+ assert.writeOK(testDB.system.views.renameCollection("non_system_collection"));
+ cst.assertNextChangesEqual({
cursor: aggCursor,
- dropType: "drop",
- expectedNext: {
- documentKey: {_id: 0},
- fullDocument: {_id: 0},
- ns: {db: testDB.getName(), coll: collToInvalidate.getName()},
- operationType: "insert",
- },
+ expectedChanges: [{
+ operationType: "rename",
+ ns: {db: testDB.getName(), coll: "system.views"},
+ to: {db: testDB.getName(), coll: "non_system_collection"}
+ }],
});
- // Operations on internal "system" collections should be filtered out and not included in
- // the change stream.
+ // Test that renaming a "system" collection to a different "system" collection does not
+ // result in a notification in the change stream.
aggCursor = cst.startWatchingAllChangesForCluster();
- // Creating a view will generate an insert entry on the "system.views" collection.
assert.commandWorked(
testDB.runCommand({create: "view1", viewOn: collToInvalidate.getName(), pipeline: []}));
- // Drop the "system.views" collection.
- assertDropCollection(testDB, "system.views");
- // Verify that the change stream does not report the insertion into "system.views", and is
- // not invalidated by dropping the system collection. Instead, it correctly reports the next
- // write to the test collection.
+ // Note that the target of the rename must be a valid "system" collection.
+ assert.writeOK(testDB.system.views.renameCollection("system.users"));
+ // Verify that the change stream filters out the rename above, instead returning the
+ // next insert to the test collection.
assert.writeOK(collToInvalidate.insert({_id: 1}));
change = cst.getOneChange(aggCursor);
assert.eq(change.operationType, "insert", tojson(change));
assert.eq(change.ns, {db: testDB.getName(), coll: collToInvalidate.getName()});
+
+ // Test that renaming a user collection to a "system" collection *does* return a rename
+ // notification.
+ assert.writeOK(collToInvalidate.renameCollection("system.views"));
+ cst.assertNextChangesEqual({
+ cursor: aggCursor,
+ expectedChanges: [{
+ operationType: "rename",
+ ns: {db: testDB.getName(), coll: collToInvalidate.getName()},
+ to: {db: testDB.getName(), coll: "system.views"}
+ }],
+ });
+
+ // Drop the "system.views" collection to avoid view catalog errors in subsequent tests.
+ assertDropCollection(testDB, "system.views");
+
+ // Recreate the test collection for the remainder of the test.
+ assert.writeOK(collToInvalidate.insert({_id: 0}));
+ cst.assertNextChangesEqual({
+ cursor: aggCursor,
+ expectedChanges: [{
+ operationType: "insert",
+ ns: {db: testDB.getName(), coll: collToInvalidate.getName()},
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0}
+ }]
+ });
}
- cst.cleanUp();
+ // Dropping a collection should generate a 'drop' entry.
+ assertDropCollection(testDB, collToInvalidate.getName());
+ // Insert to the test collection to queue up another change after the drop. This is needed
+ // since the number of 'drop' notifications is not deterministic in the sharded passthrough
+ // suites.
+ assert.writeOK(collToInvalidate.insert({_id: 0}));
+ cst.consumeDropUpTo({
+ cursor: aggCursor,
+ dropType: "drop",
+ expectedNext: {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0},
+ ns: {db: testDB.getName(), coll: collToInvalidate.getName()},
+ operationType: "insert",
+ },
+ });
+
+ // Operations on internal "system" collections should be filtered out and not included in
+ // the change stream.
+ aggCursor = cst.startWatchingAllChangesForCluster();
+ // Creating a view will generate an insert entry on the "system.views" collection.
+ assert.commandWorked(
+ testDB.runCommand({create: "view1", viewOn: collToInvalidate.getName(), pipeline: []}));
+ // Drop the "system.views" collection.
+ assertDropCollection(testDB, "system.views");
+ // Verify that the change stream does not report the insertion into "system.views", and is
+ // not invalidated by dropping the system collection. Instead, it correctly reports the next
+ // write to the test collection.
+ assert.writeOK(collToInvalidate.insert({_id: 1}));
+ change = cst.getOneChange(aggCursor);
+ assert.eq(change.operationType, "insert", tojson(change));
+ assert.eq(change.ns, {db: testDB.getName(), coll: collToInvalidate.getName()});
+}
+
+cst.cleanUp();
}());
diff --git a/jstests/change_streams/whole_cluster_resumability.js b/jstests/change_streams/whole_cluster_resumability.js
index 4a907315fd5..270f6c465db 100644
--- a/jstests/change_streams/whole_cluster_resumability.js
+++ b/jstests/change_streams/whole_cluster_resumability.js
@@ -1,169 +1,167 @@
// Basic tests for resuming a $changeStream that is open against all databases in a cluster.
(function() {
- "use strict";
+"use strict";
+
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+
+// Create two databases, with one collection in each.
+const testDBs = [db.getSiblingDB(jsTestName()), db.getSiblingDB(jsTestName() + "_other")];
+let [db1Coll, db2Coll] = testDBs.map((db) => assertDropAndRecreateCollection(db, "test"));
+const adminDB = db.getSiblingDB("admin");
+
+let cst = new ChangeStreamTest(adminDB);
+let resumeCursor = cst.startWatchingAllChangesForCluster();
+
+// Insert a document in the first database and save the resulting change stream.
+assert.writeOK(db1Coll.insert({_id: 1}));
+const firstInsertChangeDoc = cst.getOneChange(resumeCursor);
+assert.docEq(firstInsertChangeDoc.fullDocument, {_id: 1});
+
+// Test resume after the first insert.
+resumeCursor = cst.startWatchingChanges({
+ pipeline:
+ [{$changeStream: {resumeAfter: firstInsertChangeDoc._id, allChangesForCluster: true}}],
+ collection: 1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+
+// Write the next document into the second database.
+assert.writeOK(db2Coll.insert({_id: 2}));
+const secondInsertChangeDoc = cst.getOneChange(resumeCursor);
+assert.docEq(secondInsertChangeDoc.fullDocument, {_id: 2});
+
+// Write the third document into the first database again.
+assert.writeOK(db1Coll.insert({_id: 3}));
+const thirdInsertChangeDoc = cst.getOneChange(resumeCursor);
+assert.docEq(thirdInsertChangeDoc.fullDocument, {_id: 3});
+
+// Test resuming after the first insert again.
+resumeCursor = cst.startWatchingChanges({
+ pipeline:
+ [{$changeStream: {resumeAfter: firstInsertChangeDoc._id, allChangesForCluster: true}}],
+ collection: 1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+assert.docEq(cst.getOneChange(resumeCursor), secondInsertChangeDoc);
+assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
+
+// Test resume after second insert.
+resumeCursor = cst.startWatchingChanges({
+ pipeline:
+ [{$changeStream: {resumeAfter: secondInsertChangeDoc._id, allChangesForCluster: true}}],
+ collection: 1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
+
+// Rename the collection and obtain a resume token from the 'rename' notification. Skip this
+// test when running on a sharded collection, since these cannot be renamed.
+if (!FixtureHelpers.isSharded(db1Coll)) {
+ assertDropAndRecreateCollection(db1Coll.getDB(), db1Coll.getName());
+ const renameColl = db1Coll.getDB().getCollection("rename_coll");
+ assertDropCollection(renameColl.getDB(), renameColl.getName());
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-
- // Create two databases, with one collection in each.
- const testDBs = [db.getSiblingDB(jsTestName()), db.getSiblingDB(jsTestName() + "_other")];
- let [db1Coll, db2Coll] = testDBs.map((db) => assertDropAndRecreateCollection(db, "test"));
- const adminDB = db.getSiblingDB("admin");
-
- let cst = new ChangeStreamTest(adminDB);
- let resumeCursor = cst.startWatchingAllChangesForCluster();
-
- // Insert a document in the first database and save the resulting change stream.
- assert.writeOK(db1Coll.insert({_id: 1}));
- const firstInsertChangeDoc = cst.getOneChange(resumeCursor);
- assert.docEq(firstInsertChangeDoc.fullDocument, {_id: 1});
-
- // Test resume after the first insert.
resumeCursor = cst.startWatchingChanges({
- pipeline:
- [{$changeStream: {resumeAfter: firstInsertChangeDoc._id, allChangesForCluster: true}}],
collection: 1,
- aggregateOptions: {cursor: {batchSize: 0}},
+ pipeline: [{$changeStream: {allChangesForCluster: true}}],
+ aggregateOptions: {cursor: {batchSize: 0}}
});
+ assert.writeOK(db1Coll.renameCollection(renameColl.getName()));
- // Write the next document into the second database.
- assert.writeOK(db2Coll.insert({_id: 2}));
- const secondInsertChangeDoc = cst.getOneChange(resumeCursor);
- assert.docEq(secondInsertChangeDoc.fullDocument, {_id: 2});
-
- // Write the third document into the first database again.
- assert.writeOK(db1Coll.insert({_id: 3}));
- const thirdInsertChangeDoc = cst.getOneChange(resumeCursor);
- assert.docEq(thirdInsertChangeDoc.fullDocument, {_id: 3});
-
- // Test resuming after the first insert again.
- resumeCursor = cst.startWatchingChanges({
- pipeline:
- [{$changeStream: {resumeAfter: firstInsertChangeDoc._id, allChangesForCluster: true}}],
- collection: 1,
- aggregateOptions: {cursor: {batchSize: 0}},
+ const renameChanges = cst.assertNextChangesEqual({
+ cursor: resumeCursor,
+ expectedChanges: [
+ {
+ operationType: "rename",
+ ns: {db: db1Coll.getDB().getName(), coll: db1Coll.getName()},
+ to: {db: renameColl.getDB().getName(), coll: renameColl.getName()}
+ },
+ ]
});
- assert.docEq(cst.getOneChange(resumeCursor), secondInsertChangeDoc);
- assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
+ const resumeTokenRename = renameChanges[0]._id;
- // Test resume after second insert.
- resumeCursor = cst.startWatchingChanges({
- pipeline:
- [{$changeStream: {resumeAfter: secondInsertChangeDoc._id, allChangesForCluster: true}}],
- collection: 1,
- aggregateOptions: {cursor: {batchSize: 0}},
- });
- assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
-
- // Rename the collection and obtain a resume token from the 'rename' notification. Skip this
- // test when running on a sharded collection, since these cannot be renamed.
- if (!FixtureHelpers.isSharded(db1Coll)) {
- assertDropAndRecreateCollection(db1Coll.getDB(), db1Coll.getName());
- const renameColl = db1Coll.getDB().getCollection("rename_coll");
- assertDropCollection(renameColl.getDB(), renameColl.getName());
-
- resumeCursor = cst.startWatchingChanges({
- collection: 1,
- pipeline: [{$changeStream: {allChangesForCluster: true}}],
- aggregateOptions: {cursor: {batchSize: 0}}
- });
- assert.writeOK(db1Coll.renameCollection(renameColl.getName()));
-
- const renameChanges = cst.assertNextChangesEqual({
- cursor: resumeCursor,
- expectedChanges: [
- {
- operationType: "rename",
- ns: {db: db1Coll.getDB().getName(), coll: db1Coll.getName()},
- to: {db: renameColl.getDB().getName(), coll: renameColl.getName()}
- },
- ]
- });
- const resumeTokenRename = renameChanges[0]._id;
-
- // Insert into the renamed collection.
- assert.writeOK(renameColl.insert({_id: "after rename"}));
-
- // Resume from the rename notification using 'resumeAfter' and verify that the change stream
- // returns the next insert.
- let expectedInsert = {
- operationType: "insert",
- ns: {db: renameColl.getDB().getName(), coll: renameColl.getName()},
- fullDocument: {_id: "after rename"},
- documentKey: {_id: "after rename"}
- };
- resumeCursor = cst.startWatchingChanges({
- collection: 1,
- pipeline:
- [{$changeStream: {resumeAfter: resumeTokenRename, allChangesForCluster: true}}],
- aggregateOptions: {cursor: {batchSize: 0}}
- });
- cst.assertNextChangesEqual({cursor: resumeCursor, expectedChanges: expectedInsert});
-
- // Resume from the rename notification using 'startAfter' and verify that the change stream
- // returns the next insert.
- expectedInsert = {
- operationType: "insert",
- ns: {db: renameColl.getDB().getName(), coll: renameColl.getName()},
- fullDocument: {_id: "after rename"},
- documentKey: {_id: "after rename"}
- };
- resumeCursor = cst.startWatchingChanges({
- collection: 1,
- pipeline:
- [{$changeStream: {startAfter: resumeTokenRename, allChangesForCluster: true}}],
- aggregateOptions: {cursor: {batchSize: 0}}
- });
- cst.assertNextChangesEqual({cursor: resumeCursor, expectedChanges: expectedInsert});
-
- // Rename back to the original collection for reliability of the collection drops when
- // dropping the database.
- assert.writeOK(renameColl.renameCollection(db1Coll.getName()));
- }
-
- // Dropping a database should generate a 'drop' notification for the collection followed by a
- // 'dropDatabase' notification.
- resumeCursor = cst.startWatchingAllChangesForCluster();
- assert.commandWorked(testDBs[0].dropDatabase());
- const dropDbChanges = cst.assertDatabaseDrop({cursor: resumeCursor, db: testDBs[0]});
- const resumeTokenDbDrop = dropDbChanges[dropDbChanges.length - 1]._id;
-
- // Recreate the collection and insert a document.
- assert.writeOK(db1Coll.insert({_id: "after recreate"}));
+ // Insert into the renamed collection.
+ assert.writeOK(renameColl.insert({_id: "after rename"}));
+ // Resume from the rename notification using 'resumeAfter' and verify that the change stream
+ // returns the next insert.
let expectedInsert = {
operationType: "insert",
- ns: {db: testDBs[0].getName(), coll: db1Coll.getName()},
- fullDocument: {_id: "after recreate"},
- documentKey: {_id: "after recreate"}
+ ns: {db: renameColl.getDB().getName(), coll: renameColl.getName()},
+ fullDocument: {_id: "after rename"},
+ documentKey: {_id: "after rename"}
};
-
- // Resume from the database drop using 'resumeAfter', and verify the change stream picks up
- // the insert.
resumeCursor = cst.startWatchingChanges({
collection: 1,
- pipeline: [{$changeStream: {resumeAfter: resumeTokenDbDrop, allChangesForCluster: true}}],
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenRename, allChangesForCluster: true}}],
aggregateOptions: {cursor: {batchSize: 0}}
});
- cst.consumeDropUpTo({
- cursor: resumeCursor,
- dropType: "dropDatabase",
- expectedNext: expectedInsert,
- });
+ cst.assertNextChangesEqual({cursor: resumeCursor, expectedChanges: expectedInsert});
- // Resume from the database drop using 'startAfter', and verify the change stream picks up the
- // insert.
+ // Resume from the rename notification using 'startAfter' and verify that the change stream
+ // returns the next insert.
+ expectedInsert = {
+ operationType: "insert",
+ ns: {db: renameColl.getDB().getName(), coll: renameColl.getName()},
+ fullDocument: {_id: "after rename"},
+ documentKey: {_id: "after rename"}
+ };
resumeCursor = cst.startWatchingChanges({
collection: 1,
- pipeline: [{$changeStream: {startAfter: resumeTokenDbDrop, allChangesForCluster: true}}],
+ pipeline: [{$changeStream: {startAfter: resumeTokenRename, allChangesForCluster: true}}],
aggregateOptions: {cursor: {batchSize: 0}}
});
- cst.consumeDropUpTo({
- cursor: resumeCursor,
- dropType: "dropDatabase",
- expectedNext: expectedInsert,
- });
-
- cst.cleanUp();
+ cst.assertNextChangesEqual({cursor: resumeCursor, expectedChanges: expectedInsert});
+
+ // Rename back to the original collection for reliability of the collection drops when
+ // dropping the database.
+ assert.writeOK(renameColl.renameCollection(db1Coll.getName()));
+}
+
+// Dropping a database should generate a 'drop' notification for the collection followed by a
+// 'dropDatabase' notification.
+resumeCursor = cst.startWatchingAllChangesForCluster();
+assert.commandWorked(testDBs[0].dropDatabase());
+const dropDbChanges = cst.assertDatabaseDrop({cursor: resumeCursor, db: testDBs[0]});
+const resumeTokenDbDrop = dropDbChanges[dropDbChanges.length - 1]._id;
+
+// Recreate the collection and insert a document.
+assert.writeOK(db1Coll.insert({_id: "after recreate"}));
+
+let expectedInsert = {
+ operationType: "insert",
+ ns: {db: testDBs[0].getName(), coll: db1Coll.getName()},
+ fullDocument: {_id: "after recreate"},
+ documentKey: {_id: "after recreate"}
+};
+
+// Resume from the database drop using 'resumeAfter', and verify the change stream picks up
+// the insert.
+resumeCursor = cst.startWatchingChanges({
+ collection: 1,
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenDbDrop, allChangesForCluster: true}}],
+ aggregateOptions: {cursor: {batchSize: 0}}
+});
+cst.consumeDropUpTo({
+ cursor: resumeCursor,
+ dropType: "dropDatabase",
+ expectedNext: expectedInsert,
+});
+
+// Resume from the database drop using 'startAfter', and verify the change stream picks up the
+// insert.
+resumeCursor = cst.startWatchingChanges({
+ collection: 1,
+ pipeline: [{$changeStream: {startAfter: resumeTokenDbDrop, allChangesForCluster: true}}],
+ aggregateOptions: {cursor: {batchSize: 0}}
+});
+cst.consumeDropUpTo({
+ cursor: resumeCursor,
+ dropType: "dropDatabase",
+ expectedNext: expectedInsert,
+});
+
+cst.cleanUp();
})();
diff --git a/jstests/change_streams/whole_db.js b/jstests/change_streams/whole_db.js
index e05fe809636..aaa6fd0a29f 100644
--- a/jstests/change_streams/whole_db.js
+++ b/jstests/change_streams/whole_db.js
@@ -3,84 +3,84 @@
// invalidated by a database drop.
// @tags: [do_not_run_in_whole_cluster_passthrough]
(function() {
- "use strict";
+"use strict";
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest and
- // assert[Valid|Invalid]ChangeStreamNss.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest and
+ // assert[Valid|Invalid]ChangeStreamNss.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
- db = db.getSiblingDB(jsTestName());
- assert.commandWorked(db.dropDatabase());
+db = db.getSiblingDB(jsTestName());
+assert.commandWorked(db.dropDatabase());
- // Test that a single-database change stream cannot be opened on "admin", "config", or "local".
- assertInvalidChangeStreamNss("admin", 1);
- assertInvalidChangeStreamNss("config", 1);
- if (!FixtureHelpers.isMongos(db)) {
- assertInvalidChangeStreamNss("local", 1);
- }
+// Test that a single-database change stream cannot be opened on "admin", "config", or "local".
+assertInvalidChangeStreamNss("admin", 1);
+assertInvalidChangeStreamNss("config", 1);
+if (!FixtureHelpers.isMongos(db)) {
+ assertInvalidChangeStreamNss("local", 1);
+}
- let cst = new ChangeStreamTest(db);
- let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+let cst = new ChangeStreamTest(db);
+let cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
- // Test that if there are no changes, we return an empty batch.
- assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
+// Test that if there are no changes, we return an empty batch.
+assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor));
- // Test that the change stream returns an inserted doc.
- assert.writeOK(db.t1.insert({_id: 0, a: 1}));
- let expected = {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, a: 1},
- ns: {db: db.getName(), coll: "t1"},
- operationType: "insert",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+// Test that the change stream returns an inserted doc.
+assert.writeOK(db.t1.insert({_id: 0, a: 1}));
+let expected = {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, a: 1},
+ ns: {db: db.getName(), coll: "t1"},
+ operationType: "insert",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
- // Test that the change stream returns another inserted doc in a different collection but still
- // in the target db.
- assert.writeOK(db.t2.insert({_id: 0, a: 2}));
- expected = {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, a: 2},
- ns: {db: db.getName(), coll: "t2"},
- operationType: "insert",
- };
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
+// Test that the change stream returns another inserted doc in a different collection but still
+// in the target db.
+assert.writeOK(db.t2.insert({_id: 0, a: 2}));
+expected = {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, a: 2},
+ ns: {db: db.getName(), coll: "t2"},
+ operationType: "insert",
+};
+cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expected]});
- // Test that the change stream returns an inserted doc on a user-created collection whose name
- // includes "system" but is not considered an internal collection.
- const validSystemColls = ["system", "systems.views", "ssystem.views", "test.system"];
- validSystemColls.forEach(collName => {
- cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
- const coll = db.getCollection(collName);
- assert.writeOK(coll.insert({_id: 0, a: 1}));
- expected = [
- {
- documentKey: {_id: 0},
- fullDocument: {_id: 0, a: 1},
- ns: {db: db.getName(), coll: collName},
- operationType: "insert",
- },
- ];
- cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
+// Test that the change stream returns an inserted doc on a user-created collection whose name
+// includes "system" but is not considered an internal collection.
+const validSystemColls = ["system", "systems.views", "ssystem.views", "test.system"];
+validSystemColls.forEach(collName => {
+ cursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+ const coll = db.getCollection(collName);
+ assert.writeOK(coll.insert({_id: 0, a: 1}));
+ expected = [
+ {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0, a: 1},
+ ns: {db: db.getName(), coll: collName},
+ operationType: "insert",
+ },
+ ];
+ cst.assertNextChangesEqual({cursor: cursor, expectedChanges: expected});
- // Drop the collection and verify that the change stream picks up the "drop" notification.
- assertDropCollection(db, collName);
- // Insert to the test collection to queue up another change after the drop. This is needed
- // since the number of 'drop' notifications is not deterministic in the sharded passthrough
- // suites.
- assert.writeOK(coll.insert({_id: 0}));
- cst.consumeDropUpTo({
- cursor: cursor,
- dropType: "drop",
- expectedNext: {
- documentKey: {_id: 0},
- fullDocument: {_id: 0},
- ns: {db: db.getName(), coll: collName},
- operationType: "insert",
- },
- });
+ // Drop the collection and verify that the change stream picks up the "drop" notification.
+ assertDropCollection(db, collName);
+ // Insert to the test collection to queue up another change after the drop. This is needed
+ // since the number of 'drop' notifications is not deterministic in the sharded passthrough
+ // suites.
+ assert.writeOK(coll.insert({_id: 0}));
+ cst.consumeDropUpTo({
+ cursor: cursor,
+ dropType: "drop",
+ expectedNext: {
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0},
+ ns: {db: db.getName(), coll: collName},
+ operationType: "insert",
+ },
});
+});
- cst.cleanUp();
+cst.cleanUp();
}());
diff --git a/jstests/change_streams/whole_db_metadata_notifications.js b/jstests/change_streams/whole_db_metadata_notifications.js
index 54d4b8cc6e2..7b659ff4e12 100644
--- a/jstests/change_streams/whole_db_metadata_notifications.js
+++ b/jstests/change_streams/whole_db_metadata_notifications.js
@@ -3,256 +3,250 @@
// invalidated by a database drop.
// @tags: [do_not_run_in_whole_cluster_passthrough]
(function() {
- "use strict";
-
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
- load('jstests/replsets/libs/two_phase_drops.js'); // For 'TwoPhaseDropCollectionTest'.
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-
- const testDB = db.getSiblingDB(jsTestName());
- testDB.dropDatabase();
- let cst = new ChangeStreamTest(testDB);
-
- // Write a document to the collection and test that the change stream returns it
- // and getMore command closes the cursor afterwards.
- const collName = "test";
- let coll = assertDropAndRecreateCollection(testDB, collName);
-
- let aggCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
-
- // Create oplog entries of type insert, update, and delete.
- assert.writeOK(coll.insert({_id: 1}));
- assert.writeOK(coll.update({_id: 1}, {$set: {a: 1}}));
- assert.writeOK(coll.remove({_id: 1}));
-
- // Drop and recreate the collection.
- const collAgg = assertDropAndRecreateCollection(testDB, collName);
-
- // We should get 4 oplog entries of type insert, update, delete, and drop.
- let change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "insert", tojson(change));
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "update", tojson(change));
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "delete", tojson(change));
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "drop", tojson(change));
-
- // Get a valid resume token that the next change stream can use.
- assert.writeOK(collAgg.insert({_id: 1}));
-
- change = cst.getOneChange(aggCursor, false);
- const resumeToken = change._id;
-
- // For whole-db streams, it is possible to resume at a point before a collection is dropped.
- assertDropCollection(testDB, collAgg.getName());
- // Wait for two-phase drop to complete, so that the UUID no longer exists.
- assert.soon(function() {
- return !TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(testDB,
- collAgg.getName());
- });
- assert.commandWorked(testDB.runCommand(
- {aggregate: 1, pipeline: [{$changeStream: {resumeAfter: resumeToken}}], cursor: {}}));
-
- // Test that invalidation entries for other databases are filtered out.
- const otherDB = testDB.getSiblingDB(jsTestName() + "other");
- const otherDBColl = otherDB[collName + "_other"];
- assert.writeOK(otherDBColl.insert({_id: 0}));
-
- // Create collection on the database being watched.
- coll = assertDropAndRecreateCollection(testDB, collName);
-
- // Create the $changeStream. We set 'doNotModifyInPassthroughs' so that this test will not be
- // upconverted to a cluster-wide stream, which would return an entry for the dropped collection
- // in the other database.
- aggCursor = cst.startWatchingChanges(
- {pipeline: [{$changeStream: {}}], collection: 1, doNotModifyInPassthroughs: true});
-
- // Drop the collection on the other database, this should *not* invalidate the change stream.
- assertDropCollection(otherDB, otherDBColl.getName());
-
- // Insert into the collection in the watched database, and verify the change stream is able to
- // pick it up.
- assert.writeOK(coll.insert({_id: 1}));
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(change.documentKey._id, 1);
-
- // Test that renaming a collection generates a 'rename' entry for the 'from' collection. MongoDB
- // does not allow renaming of sharded collections, so only perform this test if the collection
- // is not sharded.
- if (!FixtureHelpers.isSharded(coll)) {
- assertDropAndRecreateCollection(testDB, coll.getName());
- assertDropCollection(testDB, "renamed_coll");
- aggCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
- assert.writeOK(coll.renameCollection("renamed_coll"));
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges: [{
- operationType: "rename",
- ns: {db: testDB.getName(), coll: coll.getName()},
- to: {db: testDB.getName(), coll: "renamed_coll"}
- }]
- });
-
- // Repeat the test, this time using the 'dropTarget' option with an existing target
- // collection.
- coll = testDB["renamed_coll"];
- assertCreateCollection(testDB, collName);
- assert.writeOK(testDB[collName].insert({_id: 0}));
- assert.writeOK(coll.renameCollection(collName, true /* dropTarget */));
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges: [
- {
- operationType: "insert",
- ns: {db: testDB.getName(), coll: collName},
- documentKey: {_id: 0},
- fullDocument: {_id: 0}
- },
- {
- operationType: "rename",
- ns: {db: testDB.getName(), coll: "renamed_coll"},
- to: {db: testDB.getName(), coll: collName}
- }
- ]
- });
-
- coll = testDB[collName];
- // Test renaming a collection from the database being watched to a different database. Do
- // not run this in the mongos passthrough suites since we cannot guarantee the primary shard
- // of the target database, and renameCollection requires the source and destination to be on
- // the same shard.
- if (!FixtureHelpers.isMongos(testDB)) {
- const otherDB = testDB.getSiblingDB(testDB.getName() + "_rename_target");
- // Create target collection to ensure the database exists.
- const collOtherDB = assertCreateCollection(otherDB, "test");
- assertDropCollection(otherDB, "test");
- assert.commandWorked(testDB.adminCommand(
- {renameCollection: coll.getFullName(), to: collOtherDB.getFullName()}));
- // Rename across databases drops the source collection after the collection is copied
- // over.
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges:
- [{operationType: "drop", ns: {db: testDB.getName(), coll: coll.getName()}}]
- });
-
- // Test renaming a collection from a different database to the database being watched.
- assert.commandWorked(testDB.adminCommand(
- {renameCollection: collOtherDB.getFullName(), to: coll.getFullName()}));
- // Do not check the 'ns' field since it will contain the namespace of the temp
- // collection created when renaming a collection across databases.
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "rename");
- assert.eq(change.to, {db: testDB.getName(), coll: coll.getName()});
- }
-
- // Test the behavior of a change stream watching the target collection of a $out aggregation
- // stage.
- coll.aggregate([{$out: "renamed_coll"}]);
- // Note that $out will first create a temp collection, and then rename the temp collection
- // to the target. Do not explicitly check the 'ns' field.
- const rename = cst.getOneChange(aggCursor);
- assert.eq(rename.operationType, "rename", tojson(rename));
- assert.eq(rename.to, {db: testDB.getName(), coll: "renamed_coll"}, tojson(rename));
-
- // The change stream should not be invalidated by the rename(s).
- assert.eq(0, cst.getNextBatch(aggCursor).nextBatch.length);
- assert.writeOK(coll.insert({_id: 2}));
- assert.eq(cst.getOneChange(aggCursor).operationType, "insert");
-
- // Drop the new collection to avoid an additional 'drop' notification when the database is
- // dropped.
- assertDropCollection(testDB, "renamed_coll");
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges:
- [{operationType: "drop", ns: {db: testDB.getName(), coll: "renamed_coll"}}],
- });
- }
-
- // Dropping a collection should return a 'drop' entry.
- assertDropCollection(testDB, coll.getName());
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges:
- [{operationType: "drop", ns: {db: testDB.getName(), coll: coll.getName()}}],
- });
-
- // Operations on internal "system" collections should be filtered out and not included in the
- // change stream.
- aggCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
- // Creating a view will generate an insert entry on the "system.views" collection.
- assert.commandWorked(
- testDB.runCommand({create: "view1", viewOn: coll.getName(), pipeline: []}));
- // Drop the "system.views" collection.
- assertDropCollection(testDB, "system.views");
- // Verify that the change stream does not report the insertion into "system.views", and is
- // not invalidated by dropping the system collection. Instead, it correctly reports the next
- // write to the test collection.
- assert.writeOK(coll.insert({_id: 0}));
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(change.ns, {db: testDB.getName(), coll: coll.getName()});
-
- // Test that renaming a "system" collection *does* return a notification if the target of
- // the rename is a non-system collection.
- assert.commandWorked(
- testDB.runCommand({create: "view1", viewOn: coll.getName(), pipeline: []}));
- assert.writeOK(testDB.system.views.renameCollection("non_system_collection"));
- cst.assertNextChangesEqual({
- cursor: aggCursor,
- expectedChanges: [{
- operationType: "rename",
- ns: {db: testDB.getName(), coll: "system.views"},
- to: {db: testDB.getName(), coll: "non_system_collection"}
- }],
- });
-
- // Test that renaming a "system" collection to a different "system" collection does not
- // result in a notification in the change stream.
+"use strict";
+
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
+load('jstests/replsets/libs/two_phase_drops.js'); // For 'TwoPhaseDropCollectionTest'.
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+
+const testDB = db.getSiblingDB(jsTestName());
+testDB.dropDatabase();
+let cst = new ChangeStreamTest(testDB);
+
+// Write a document to the collection and test that the change stream returns it
+// and getMore command closes the cursor afterwards.
+const collName = "test";
+let coll = assertDropAndRecreateCollection(testDB, collName);
+
+let aggCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+
+// Create oplog entries of type insert, update, and delete.
+assert.writeOK(coll.insert({_id: 1}));
+assert.writeOK(coll.update({_id: 1}, {$set: {a: 1}}));
+assert.writeOK(coll.remove({_id: 1}));
+
+// Drop and recreate the collection.
+const collAgg = assertDropAndRecreateCollection(testDB, collName);
+
+// We should get 4 oplog entries of type insert, update, delete, and drop.
+let change = cst.getOneChange(aggCursor);
+assert.eq(change.operationType, "insert", tojson(change));
+change = cst.getOneChange(aggCursor);
+assert.eq(change.operationType, "update", tojson(change));
+change = cst.getOneChange(aggCursor);
+assert.eq(change.operationType, "delete", tojson(change));
+change = cst.getOneChange(aggCursor);
+assert.eq(change.operationType, "drop", tojson(change));
+
+// Get a valid resume token that the next change stream can use.
+assert.writeOK(collAgg.insert({_id: 1}));
+
+change = cst.getOneChange(aggCursor, false);
+const resumeToken = change._id;
+
+// For whole-db streams, it is possible to resume at a point before a collection is dropped.
+assertDropCollection(testDB, collAgg.getName());
+// Wait for two-phase drop to complete, so that the UUID no longer exists.
+assert.soon(function() {
+ return !TwoPhaseDropCollectionTest.collectionIsPendingDropInDatabase(testDB, collAgg.getName());
+});
+assert.commandWorked(testDB.runCommand(
+ {aggregate: 1, pipeline: [{$changeStream: {resumeAfter: resumeToken}}], cursor: {}}));
+
+// Test that invalidation entries for other databases are filtered out.
+const otherDB = testDB.getSiblingDB(jsTestName() + "other");
+const otherDBColl = otherDB[collName + "_other"];
+assert.writeOK(otherDBColl.insert({_id: 0}));
+
+// Create collection on the database being watched.
+coll = assertDropAndRecreateCollection(testDB, collName);
+
+// Create the $changeStream. We set 'doNotModifyInPassthroughs' so that this test will not be
+// upconverted to a cluster-wide stream, which would return an entry for the dropped collection
+// in the other database.
+aggCursor = cst.startWatchingChanges(
+ {pipeline: [{$changeStream: {}}], collection: 1, doNotModifyInPassthroughs: true});
+
+// Drop the collection on the other database, this should *not* invalidate the change stream.
+assertDropCollection(otherDB, otherDBColl.getName());
+
+// Insert into the collection in the watched database, and verify the change stream is able to
+// pick it up.
+assert.writeOK(coll.insert({_id: 1}));
+change = cst.getOneChange(aggCursor);
+assert.eq(change.operationType, "insert", tojson(change));
+assert.eq(change.documentKey._id, 1);
+
+// Test that renaming a collection generates a 'rename' entry for the 'from' collection. MongoDB
+// does not allow renaming of sharded collections, so only perform this test if the collection
+// is not sharded.
+if (!FixtureHelpers.isSharded(coll)) {
+ assertDropAndRecreateCollection(testDB, coll.getName());
+ assertDropCollection(testDB, "renamed_coll");
aggCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
- assert.commandWorked(
- testDB.runCommand({create: "view1", viewOn: coll.getName(), pipeline: []}));
- // Note that the target of the rename must be a valid "system" collection.
- assert.writeOK(testDB.system.views.renameCollection("system.users"));
- // Verify that the change stream filters out the rename above, instead returning the next insert
- // to the test collection.
- assert.writeOK(coll.insert({_id: 1}));
- change = cst.getOneChange(aggCursor);
- assert.eq(change.operationType, "insert", tojson(change));
- assert.eq(change.ns, {db: testDB.getName(), coll: coll.getName()});
-
- // Test that renaming a user collection to a "system" collection *is* returned in the change
- // stream.
- assert.writeOK(coll.renameCollection("system.views"));
+ assert.writeOK(coll.renameCollection("renamed_coll"));
cst.assertNextChangesEqual({
cursor: aggCursor,
expectedChanges: [{
operationType: "rename",
ns: {db: testDB.getName(), coll: coll.getName()},
- to: {db: testDB.getName(), coll: "system.views"}
- }],
+ to: {db: testDB.getName(), coll: "renamed_coll"}
+ }]
});
- // Drop the "system.views" collection to avoid view catalog errors in subsequent tests.
- assertDropCollection(testDB, "system.views");
- assertDropCollection(testDB, "non_system_collection");
+ // Repeat the test, this time using the 'dropTarget' option with an existing target
+ // collection.
+ coll = testDB["renamed_coll"];
+ assertCreateCollection(testDB, collName);
+ assert.writeOK(testDB[collName].insert({_id: 0}));
+ assert.writeOK(coll.renameCollection(collName, true /* dropTarget */));
cst.assertNextChangesEqual({
cursor: aggCursor,
expectedChanges: [
- {operationType: "drop", ns: {db: testDB.getName(), coll: "non_system_collection"}},
+ {
+ operationType: "insert",
+ ns: {db: testDB.getName(), coll: collName},
+ documentKey: {_id: 0},
+ fullDocument: {_id: 0}
+ },
+ {
+ operationType: "rename",
+ ns: {db: testDB.getName(), coll: "renamed_coll"},
+ to: {db: testDB.getName(), coll: collName}
+ }
]
});
- // Dropping the database should generate a 'dropDatabase' notification followed by an
- // 'invalidate'.
- assert.commandWorked(testDB.dropDatabase());
- cst.assertDatabaseDrop({cursor: aggCursor, db: testDB});
- cst.assertNextChangesEqual(
- {cursor: aggCursor, expectedChanges: [{operationType: "invalidate"}]});
+ coll = testDB[collName];
+ // Test renaming a collection from the database being watched to a different database. Do
+ // not run this in the mongos passthrough suites since we cannot guarantee the primary shard
+ // of the target database, and renameCollection requires the source and destination to be on
+ // the same shard.
+ if (!FixtureHelpers.isMongos(testDB)) {
+ const otherDB = testDB.getSiblingDB(testDB.getName() + "_rename_target");
+ // Create target collection to ensure the database exists.
+ const collOtherDB = assertCreateCollection(otherDB, "test");
+ assertDropCollection(otherDB, "test");
+ assert.commandWorked(testDB.adminCommand(
+ {renameCollection: coll.getFullName(), to: collOtherDB.getFullName()}));
+ // Rename across databases drops the source collection after the collection is copied
+ // over.
+ cst.assertNextChangesEqual({
+ cursor: aggCursor,
+ expectedChanges:
+ [{operationType: "drop", ns: {db: testDB.getName(), coll: coll.getName()}}]
+ });
+
+ // Test renaming a collection from a different database to the database being watched.
+ assert.commandWorked(testDB.adminCommand(
+ {renameCollection: collOtherDB.getFullName(), to: coll.getFullName()}));
+ // Do not check the 'ns' field since it will contain the namespace of the temp
+ // collection created when renaming a collection across databases.
+ change = cst.getOneChange(aggCursor);
+ assert.eq(change.operationType, "rename");
+ assert.eq(change.to, {db: testDB.getName(), coll: coll.getName()});
+ }
- cst.cleanUp();
+ // Test the behavior of a change stream watching the target collection of a $out aggregation
+ // stage.
+ coll.aggregate([{$out: "renamed_coll"}]);
+ // Note that $out will first create a temp collection, and then rename the temp collection
+ // to the target. Do not explicitly check the 'ns' field.
+ const rename = cst.getOneChange(aggCursor);
+ assert.eq(rename.operationType, "rename", tojson(rename));
+ assert.eq(rename.to, {db: testDB.getName(), coll: "renamed_coll"}, tojson(rename));
+
+ // The change stream should not be invalidated by the rename(s).
+ assert.eq(0, cst.getNextBatch(aggCursor).nextBatch.length);
+ assert.writeOK(coll.insert({_id: 2}));
+ assert.eq(cst.getOneChange(aggCursor).operationType, "insert");
+
+ // Drop the new collection to avoid an additional 'drop' notification when the database is
+ // dropped.
+ assertDropCollection(testDB, "renamed_coll");
+ cst.assertNextChangesEqual({
+ cursor: aggCursor,
+ expectedChanges:
+ [{operationType: "drop", ns: {db: testDB.getName(), coll: "renamed_coll"}}],
+ });
+}
+
+// Dropping a collection should return a 'drop' entry.
+assertDropCollection(testDB, coll.getName());
+cst.assertNextChangesEqual({
+ cursor: aggCursor,
+ expectedChanges: [{operationType: "drop", ns: {db: testDB.getName(), coll: coll.getName()}}],
+});
+
+// Operations on internal "system" collections should be filtered out and not included in the
+// change stream.
+aggCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+// Creating a view will generate an insert entry on the "system.views" collection.
+assert.commandWorked(testDB.runCommand({create: "view1", viewOn: coll.getName(), pipeline: []}));
+// Drop the "system.views" collection.
+assertDropCollection(testDB, "system.views");
+// Verify that the change stream does not report the insertion into "system.views", and is
+// not invalidated by dropping the system collection. Instead, it correctly reports the next
+// write to the test collection.
+assert.writeOK(coll.insert({_id: 0}));
+change = cst.getOneChange(aggCursor);
+assert.eq(change.operationType, "insert", tojson(change));
+assert.eq(change.ns, {db: testDB.getName(), coll: coll.getName()});
+
+// Test that renaming a "system" collection *does* return a notification if the target of
+// the rename is a non-system collection.
+assert.commandWorked(testDB.runCommand({create: "view1", viewOn: coll.getName(), pipeline: []}));
+assert.writeOK(testDB.system.views.renameCollection("non_system_collection"));
+cst.assertNextChangesEqual({
+ cursor: aggCursor,
+ expectedChanges: [{
+ operationType: "rename",
+ ns: {db: testDB.getName(), coll: "system.views"},
+ to: {db: testDB.getName(), coll: "non_system_collection"}
+ }],
+});
+
+// Test that renaming a "system" collection to a different "system" collection does not
+// result in a notification in the change stream.
+aggCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+assert.commandWorked(testDB.runCommand({create: "view1", viewOn: coll.getName(), pipeline: []}));
+// Note that the target of the rename must be a valid "system" collection.
+assert.writeOK(testDB.system.views.renameCollection("system.users"));
+// Verify that the change stream filters out the rename above, instead returning the next insert
+// to the test collection.
+assert.writeOK(coll.insert({_id: 1}));
+change = cst.getOneChange(aggCursor);
+assert.eq(change.operationType, "insert", tojson(change));
+assert.eq(change.ns, {db: testDB.getName(), coll: coll.getName()});
+
+// Test that renaming a user collection to a "system" collection *is* returned in the change
+// stream.
+assert.writeOK(coll.renameCollection("system.views"));
+cst.assertNextChangesEqual({
+ cursor: aggCursor,
+ expectedChanges: [{
+ operationType: "rename",
+ ns: {db: testDB.getName(), coll: coll.getName()},
+ to: {db: testDB.getName(), coll: "system.views"}
+ }],
+});
+
+// Drop the "system.views" collection to avoid view catalog errors in subsequent tests.
+assertDropCollection(testDB, "system.views");
+assertDropCollection(testDB, "non_system_collection");
+cst.assertNextChangesEqual({
+ cursor: aggCursor,
+ expectedChanges: [
+ {operationType: "drop", ns: {db: testDB.getName(), coll: "non_system_collection"}},
+ ]
+});
+
+// Dropping the database should generate a 'dropDatabase' notification followed by an
+// 'invalidate'.
+assert.commandWorked(testDB.dropDatabase());
+cst.assertDatabaseDrop({cursor: aggCursor, db: testDB});
+cst.assertNextChangesEqual({cursor: aggCursor, expectedChanges: [{operationType: "invalidate"}]});
+
+cst.cleanUp();
}());
diff --git a/jstests/change_streams/whole_db_resumability.js b/jstests/change_streams/whole_db_resumability.js
index b71495355fa..697f72ddcf9 100644
--- a/jstests/change_streams/whole_db_resumability.js
+++ b/jstests/change_streams/whole_db_resumability.js
@@ -3,199 +3,199 @@
// invalidated by a database drop.
// @tags: [do_not_run_in_whole_cluster_passthrough]
(function() {
- "use strict";
-
- load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
- load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
- load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
-
- // Drop and recreate the collections to be used in this set of tests.
- const testDB = db.getSiblingDB(jsTestName());
- let coll = assertDropAndRecreateCollection(testDB, "resume_coll");
- const otherColl = assertDropAndRecreateCollection(testDB, "resume_coll_other");
-
- let cst = new ChangeStreamTest(testDB);
- let resumeCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
-
- // Insert a single document to each collection and save the resume token from the first insert.
- assert.writeOK(coll.insert({_id: 1}));
- assert.writeOK(otherColl.insert({_id: 2}));
- const firstInsertChangeDoc = cst.getOneChange(resumeCursor);
- assert.docEq(firstInsertChangeDoc.fullDocument, {_id: 1});
- assert.eq(firstInsertChangeDoc.ns, {db: testDB.getName(), coll: coll.getName()});
-
- // Test resuming the change stream after the first insert should pick up the insert on the
- // second collection.
- resumeCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: firstInsertChangeDoc._id}}],
- collection: 1,
- aggregateOptions: {cursor: {batchSize: 0}},
+"use strict";
+
+load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
+load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest.
+load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
+
+// Drop and recreate the collections to be used in this set of tests.
+const testDB = db.getSiblingDB(jsTestName());
+let coll = assertDropAndRecreateCollection(testDB, "resume_coll");
+const otherColl = assertDropAndRecreateCollection(testDB, "resume_coll_other");
+
+let cst = new ChangeStreamTest(testDB);
+let resumeCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+
+// Insert a single document to each collection and save the resume token from the first insert.
+assert.writeOK(coll.insert({_id: 1}));
+assert.writeOK(otherColl.insert({_id: 2}));
+const firstInsertChangeDoc = cst.getOneChange(resumeCursor);
+assert.docEq(firstInsertChangeDoc.fullDocument, {_id: 1});
+assert.eq(firstInsertChangeDoc.ns, {db: testDB.getName(), coll: coll.getName()});
+
+// Test resuming the change stream after the first insert should pick up the insert on the
+// second collection.
+resumeCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: firstInsertChangeDoc._id}}],
+ collection: 1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+
+const secondInsertChangeDoc = cst.getOneChange(resumeCursor);
+assert.docEq(secondInsertChangeDoc.fullDocument, {_id: 2});
+assert.eq(secondInsertChangeDoc.ns, {db: testDB.getName(), coll: otherColl.getName()});
+
+// Insert a third document to the first collection and test that the change stream picks it up.
+assert.writeOK(coll.insert({_id: 3}));
+const thirdInsertChangeDoc = cst.getOneChange(resumeCursor);
+assert.docEq(thirdInsertChangeDoc.fullDocument, {_id: 3});
+assert.eq(thirdInsertChangeDoc.ns, {db: testDB.getName(), coll: coll.getName()});
+
+// Test resuming after the first insert again.
+resumeCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: firstInsertChangeDoc._id}}],
+ collection: 1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+assert.docEq(cst.getOneChange(resumeCursor), secondInsertChangeDoc);
+assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
+
+// Test resume after second insert.
+resumeCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: secondInsertChangeDoc._id}}],
+ collection: 1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
+
+// Rename the collection and attempt to resume from the 'rename' notification. Skip this
+// test when running on a sharded collection, since these cannot be renamed.
+if (!FixtureHelpers.isSharded(coll)) {
+ assertDropAndRecreateCollection(coll.getDB(), coll.getName());
+ const renameColl = coll.getDB().getCollection("rename_coll");
+ assertDropCollection(renameColl.getDB(), renameColl.getName());
+
+ resumeCursor = cst.startWatchingChanges({collection: 1, pipeline: [{$changeStream: {}}]});
+ assert.writeOK(coll.renameCollection(renameColl.getName()));
+
+ const renameChanges = cst.assertNextChangesEqual({
+ cursor: resumeCursor,
+ expectedChanges: [
+ {
+ operationType: "rename",
+ ns: {db: coll.getDB().getName(), coll: coll.getName()},
+ to: {db: renameColl.getDB().getName(), coll: renameColl.getName()}
+ },
+ ]
});
+ const resumeTokenRename = renameChanges[0]._id;
- const secondInsertChangeDoc = cst.getOneChange(resumeCursor);
- assert.docEq(secondInsertChangeDoc.fullDocument, {_id: 2});
- assert.eq(secondInsertChangeDoc.ns, {db: testDB.getName(), coll: otherColl.getName()});
+ // Insert into the renamed collection.
+ assert.writeOK(renameColl.insert({_id: "after rename"}));
- // Insert a third document to the first collection and test that the change stream picks it up.
- assert.writeOK(coll.insert({_id: 3}));
- const thirdInsertChangeDoc = cst.getOneChange(resumeCursor);
- assert.docEq(thirdInsertChangeDoc.fullDocument, {_id: 3});
- assert.eq(thirdInsertChangeDoc.ns, {db: testDB.getName(), coll: coll.getName()});
+ // Resume from the rename notification using 'resumeAfter' and verify that the change stream
+ // returns the next insert.
+ let expectedInsert = {
+ operationType: "insert",
+ ns: {db: renameColl.getDB().getName(), coll: renameColl.getName()},
+ fullDocument: {_id: "after rename"},
+ documentKey: {_id: "after rename"}
+ };
+ resumeCursor = cst.startWatchingChanges(
+ {collection: 1, pipeline: [{$changeStream: {resumeAfter: resumeTokenRename}}]});
+ cst.assertNextChangesEqual({cursor: resumeCursor, expectedChanges: expectedInsert});
- // Test resuming after the first insert again.
- resumeCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: firstInsertChangeDoc._id}}],
- collection: 1,
- aggregateOptions: {cursor: {batchSize: 0}},
- });
- assert.docEq(cst.getOneChange(resumeCursor), secondInsertChangeDoc);
- assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
+ // Resume from the rename notification using 'startAfter' and verify that the change stream
+ // returns the next insert.
+ expectedInsert = {
+ operationType: "insert",
+ ns: {db: renameColl.getDB().getName(), coll: renameColl.getName()},
+ fullDocument: {_id: "after rename"},
+ documentKey: {_id: "after rename"}
+ };
+ resumeCursor = cst.startWatchingChanges(
+ {collection: 1, pipeline: [{$changeStream: {startAfter: resumeTokenRename}}]});
+ cst.assertNextChangesEqual({cursor: resumeCursor, expectedChanges: expectedInsert});
- // Test resume after second insert.
- resumeCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: secondInsertChangeDoc._id}}],
- collection: 1,
- aggregateOptions: {cursor: {batchSize: 0}},
- });
- assert.docEq(cst.getOneChange(resumeCursor), thirdInsertChangeDoc);
-
- // Rename the collection and attempt to resume from the 'rename' notification. Skip this
- // test when running on a sharded collection, since these cannot be renamed.
- if (!FixtureHelpers.isSharded(coll)) {
- assertDropAndRecreateCollection(coll.getDB(), coll.getName());
- const renameColl = coll.getDB().getCollection("rename_coll");
- assertDropCollection(renameColl.getDB(), renameColl.getName());
-
- resumeCursor = cst.startWatchingChanges({collection: 1, pipeline: [{$changeStream: {}}]});
- assert.writeOK(coll.renameCollection(renameColl.getName()));
-
- const renameChanges = cst.assertNextChangesEqual({
- cursor: resumeCursor,
- expectedChanges: [
- {
- operationType: "rename",
- ns: {db: coll.getDB().getName(), coll: coll.getName()},
- to: {db: renameColl.getDB().getName(), coll: renameColl.getName()}
- },
- ]
- });
- const resumeTokenRename = renameChanges[0]._id;
-
- // Insert into the renamed collection.
- assert.writeOK(renameColl.insert({_id: "after rename"}));
-
- // Resume from the rename notification using 'resumeAfter' and verify that the change stream
- // returns the next insert.
- let expectedInsert = {
- operationType: "insert",
- ns: {db: renameColl.getDB().getName(), coll: renameColl.getName()},
- fullDocument: {_id: "after rename"},
- documentKey: {_id: "after rename"}
- };
- resumeCursor = cst.startWatchingChanges(
- {collection: 1, pipeline: [{$changeStream: {resumeAfter: resumeTokenRename}}]});
- cst.assertNextChangesEqual({cursor: resumeCursor, expectedChanges: expectedInsert});
-
- // Resume from the rename notification using 'startAfter' and verify that the change stream
- // returns the next insert.
- expectedInsert = {
- operationType: "insert",
- ns: {db: renameColl.getDB().getName(), coll: renameColl.getName()},
- fullDocument: {_id: "after rename"},
- documentKey: {_id: "after rename"}
- };
- resumeCursor = cst.startWatchingChanges(
- {collection: 1, pipeline: [{$changeStream: {startAfter: resumeTokenRename}}]});
- cst.assertNextChangesEqual({cursor: resumeCursor, expectedChanges: expectedInsert});
-
- // Rename back to the original collection for reliability of the collection drops when
- // dropping the database.
- assert.writeOK(renameColl.renameCollection(coll.getName()));
- }
-
- // Explicitly drop one collection to ensure reliability of the order of notifications from the
- // dropDatabase command.
- resumeCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
- assertDropCollection(testDB, otherColl.getName());
- const firstCollDrop = cst.getOneChange(resumeCursor);
- assert.eq(firstCollDrop.operationType, "drop", tojson(firstCollDrop));
- assert.eq(firstCollDrop.ns, {db: testDB.getName(), coll: otherColl.getName()});
-
- // Dropping a database should generate a 'drop' notification for each collection, a
- // 'dropDatabase' notification, and finally an 'invalidate'.
- assert.commandWorked(testDB.dropDatabase());
- const dropDbChanges = cst.assertDatabaseDrop({cursor: resumeCursor, db: testDB});
- const secondCollDrop = dropDbChanges[0];
- // For sharded passthrough suites, we know that the last entry will be a 'dropDatabase' however
- // there may be multiple collection drops in 'dropDbChanges' depending on the number of involved
- // shards.
- const resumeTokenDropDb = dropDbChanges[dropDbChanges.length - 1]._id;
- const resumeTokenInvalidate =
- cst.assertNextChangesEqual(
- {cursor: resumeCursor, expectedChanges: [{operationType: "invalidate"}]})[0]
- ._id;
-
- // Test resuming from the first collection drop and the second collection drop as a result of
+ // Rename back to the original collection for reliability of the collection drops when
// dropping the database.
- [firstCollDrop, secondCollDrop].forEach(token => {
- resumeCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: token._id}}],
- collection: 1,
- aggregateOptions: {cursor: {batchSize: 0}},
- });
- cst.assertDatabaseDrop({cursor: resumeCursor, db: testDB});
- cst.assertNextChangesEqual(
- {cursor: resumeCursor, expectedChanges: [{operationType: "invalidate"}]});
- });
-
- // Recreate the test collection.
- assert.writeOK(coll.insert({_id: "after recreate"}));
-
- // Test resuming from the 'dropDatabase' entry using 'resumeAfter'.
- resumeCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {resumeAfter: resumeTokenDropDb}}],
- collection: 1,
- aggregateOptions: {cursor: {batchSize: 0}},
- });
+ assert.writeOK(renameColl.renameCollection(coll.getName()));
+}
+
+// Explicitly drop one collection to ensure reliability of the order of notifications from the
+// dropDatabase command.
+resumeCursor = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1});
+assertDropCollection(testDB, otherColl.getName());
+const firstCollDrop = cst.getOneChange(resumeCursor);
+assert.eq(firstCollDrop.operationType, "drop", tojson(firstCollDrop));
+assert.eq(firstCollDrop.ns, {db: testDB.getName(), coll: otherColl.getName()});
+
+// Dropping a database should generate a 'drop' notification for each collection, a
+// 'dropDatabase' notification, and finally an 'invalidate'.
+assert.commandWorked(testDB.dropDatabase());
+const dropDbChanges = cst.assertDatabaseDrop({cursor: resumeCursor, db: testDB});
+const secondCollDrop = dropDbChanges[0];
+// For sharded passthrough suites, we know that the last entry will be a 'dropDatabase' however
+// there may be multiple collection drops in 'dropDbChanges' depending on the number of involved
+// shards.
+const resumeTokenDropDb = dropDbChanges[dropDbChanges.length - 1]._id;
+const resumeTokenInvalidate =
cst.assertNextChangesEqual(
- {cursor: resumeCursor, expectedChanges: [{operationType: "invalidate"}]});
-
- // Test resuming from the 'invalidate' entry using 'resumeAfter'.
- assert.commandFailedWithCode(db.runCommand({
- aggregate: 1,
- pipeline: [{$changeStream: {resumeAfter: resumeTokenInvalidate}}],
- cursor: {},
- collation: {locale: "simple"},
- }),
- ErrorCodes.InvalidResumeToken);
+ {cursor: resumeCursor, expectedChanges: [{operationType: "invalidate"}]})[0]
+ ._id;
- // Test resuming from the 'dropDatabase' entry using 'startAfter'.
+// Test resuming from the first collection drop and the second collection drop as a result of
+// dropping the database.
+[firstCollDrop, secondCollDrop].forEach(token => {
resumeCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {startAfter: resumeTokenDropDb}}],
+ pipeline: [{$changeStream: {resumeAfter: token._id}}],
collection: 1,
aggregateOptions: {cursor: {batchSize: 0}},
});
+ cst.assertDatabaseDrop({cursor: resumeCursor, db: testDB});
cst.assertNextChangesEqual(
{cursor: resumeCursor, expectedChanges: [{operationType: "invalidate"}]});
-
- // Test resuming from the 'invalidate' entry using 'startAfter' and verifies it picks up the
- // insert after recreating the db/collection.
- const expectedInsert = {
- operationType: "insert",
- ns: {db: testDB.getName(), coll: coll.getName()},
- fullDocument: {_id: "after recreate"},
- documentKey: {_id: "after recreate"}
- };
- resumeCursor = cst.startWatchingChanges({
- pipeline: [{$changeStream: {startAfter: resumeTokenInvalidate}}],
- collection: 1,
- aggregateOptions: {cursor: {batchSize: 0}},
- });
- cst.consumeDropUpTo({
- cursor: resumeCursor,
- dropType: "dropDatabase",
- expectedNext: expectedInsert,
- });
-
- cst.cleanUp();
+});
+
+// Recreate the test collection.
+assert.writeOK(coll.insert({_id: "after recreate"}));
+
+// Test resuming from the 'dropDatabase' entry using 'resumeAfter'.
+resumeCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenDropDb}}],
+ collection: 1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+cst.assertNextChangesEqual(
+ {cursor: resumeCursor, expectedChanges: [{operationType: "invalidate"}]});
+
+// Test resuming from the 'invalidate' entry using 'resumeAfter'.
+assert.commandFailedWithCode(db.runCommand({
+ aggregate: 1,
+ pipeline: [{$changeStream: {resumeAfter: resumeTokenInvalidate}}],
+ cursor: {},
+ collation: {locale: "simple"},
+}),
+ ErrorCodes.InvalidResumeToken);
+
+// Test resuming from the 'dropDatabase' entry using 'startAfter'.
+resumeCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {startAfter: resumeTokenDropDb}}],
+ collection: 1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+cst.assertNextChangesEqual(
+ {cursor: resumeCursor, expectedChanges: [{operationType: "invalidate"}]});
+
+// Test resuming from the 'invalidate' entry using 'startAfter' and verifies it picks up the
+// insert after recreating the db/collection.
+const expectedInsert = {
+ operationType: "insert",
+ ns: {db: testDB.getName(), coll: coll.getName()},
+ fullDocument: {_id: "after recreate"},
+ documentKey: {_id: "after recreate"}
+};
+resumeCursor = cst.startWatchingChanges({
+ pipeline: [{$changeStream: {startAfter: resumeTokenInvalidate}}],
+ collection: 1,
+ aggregateOptions: {cursor: {batchSize: 0}},
+});
+cst.consumeDropUpTo({
+ cursor: resumeCursor,
+ dropType: "dropDatabase",
+ expectedNext: expectedInsert,
+});
+
+cst.cleanUp();
})();