summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Polato <paolo.polato@mongodb.com>2022-11-25 14:41:09 +0000
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2022-11-25 15:11:33 +0000
commit8a01d8d90871e627134a553611d3f6b2caacf9c4 (patch)
tree6bb508655bb64c8bac10411bfc0d6e3835cda0d5
parentfe588e79d92b90eaf3a057e8fca78c0b8b8169fd (diff)
downloadmongo-8a01d8d90871e627134a553611d3f6b2caacf9c4.tar.gz
SERVER-71085 Generate a single oplog entry from the primary shard when _shardsvrCreateCollection() is invoked
-rw-r--r--jstests/change_streams/metadata_notifications.js13
-rw-r--r--jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocument_rewrite.js14
-rw-r--r--jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_namespace_rewrite.js334
-rw-r--r--jstests/concurrency/fsm_workloads/random_moveChunk_multi_update_delete_change_streams.js42
-rw-r--r--jstests/sharding/change_stream_metadata_notifications.js1
-rw-r--r--src/mongo/db/s/drop_collection_coordinator.cpp5
-rw-r--r--src/mongo/db/s/sharding_ddl_util.cpp26
7 files changed, 223 insertions, 212 deletions
diff --git a/jstests/change_streams/metadata_notifications.js b/jstests/change_streams/metadata_notifications.js
index d936340c943..d25f5bd3aee 100644
--- a/jstests/change_streams/metadata_notifications.js
+++ b/jstests/change_streams/metadata_notifications.js
@@ -1,7 +1,10 @@
// Tests of $changeStream notifications for metadata operations.
// Do not run in whole-cluster passthrough since this test assumes that the change stream will be
// invalidated by a database drop.
-// @tags: [do_not_run_in_whole_cluster_passthrough]
+// @tags: [
+// do_not_run_in_whole_cluster_passthrough,
+// requires_fcv_63,
+// ]
(function() {
"use strict";
@@ -78,15 +81,9 @@ const resumeToken = changes[0]._id;
const resumeTokenDrop = changes[3]._id;
const resumeTokenInvalidate = changes[4]._id;
-// Verify we can startAfter the invalidate. We should see one drop event for every other shard
-// that the collection was present on, or nothing if the collection was not sharded. This test
+// Verify we can startAfter the invalidate, but no new events may be retrieved. This test
// exercises the bug described in SERVER-41196.
const restartedStream = coll.watch([], {startAfter: resumeTokenInvalidate});
-for (let i = 0; i < numShards - 1; ++i) {
- assert.soon(() => restartedStream.hasNext());
- const nextEvent = restartedStream.next();
- assert.eq(nextEvent.operationType, "drop", () => tojson(nextEvent));
-}
assert(!restartedStream.hasNext(), () => tojson(restartedStream.next()));
// Verify that we can resume a stream after a collection drop without an explicit collation.
diff --git a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocument_rewrite.js b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocument_rewrite.js
index 371ac35b4e4..97a42780f54 100644
--- a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocument_rewrite.js
+++ b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocument_rewrite.js
@@ -3,7 +3,7 @@
// part of the oplog cursor's filter in order to filter out results as early as possible.
// @tags: [
// featureFlagChangeStreamsRewrite,
-// requires_fcv_51,
+// requires_fcv_63,
// requires_pipeline_optimization,
// requires_sharding,
// uses_change_streams,
@@ -210,17 +210,17 @@ const runVerifyOpsTestcases = (op) => {
// Test that {$eq: null} on the 'fullDocument' field matches the 'drop' event.
verifyOps(resumeAfterToken,
{$match: {operationType: op, fullDocument: {$eq: null}}},
- [[op], [op]],
- [1, 1] /* expectedChangeStreamDocsReturned */,
- [1, 1] /* expectedOplogCursorReturnedDocs */,
+ [[op]],
+ [1, 0] /* expectedChangeStreamDocsReturned */,
+ [1, 0] /* expectedOplogCursorReturnedDocs */,
true /* runOnWholeDB */);
// Test that {$exists: false} on the 'fullDocument' field matches the 'drop' event.
verifyOps(resumeAfterToken,
{$match: {operationType: op, fullDocument: {$exists: false}}},
- [[op], [op]],
- [1, 1] /* expectedChangeStreamDocsReturned */,
- [1, 1] /* expectedOplogCursorReturnedDocs */,
+ [[op]],
+ [1, 0] /* expectedChangeStreamDocsReturned */,
+ [1, 0] /* expectedOplogCursorReturnedDocs */,
true /* runOnWholeDB */);
// Test that {$exists: true} on the 'fullDocument' field does not match the 'drop' event.
diff --git a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_namespace_rewrite.js b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_namespace_rewrite.js
index 526eff8b252..fed3b3268f5 100644
--- a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_namespace_rewrite.js
+++ b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_namespace_rewrite.js
@@ -2,7 +2,7 @@
// and apply it to oplog-format documents in order to filter out results as early as possible.
// @tags: [
// featureFlagChangeStreamsRewrite,
-// requires_fcv_51,
+// requires_fcv_63,
// requires_pipeline_optimization,
// requires_sharding,
// uses_change_streams,
@@ -47,8 +47,9 @@ function verifyOnWholeCluster(
changeStreamSpec: {resumeAfter: resumeAfterToken},
userMatchExpr: userMatchExpr,
expectedResult: expectedResult,
- expectedOplogNReturnedPerShard:
- [expectedOplogRetDocsForEachShard, expectedOplogRetDocsForEachShard]
+ expectedOplogNReturnedPerShard: Array.isArray(expectedOplogRetDocsForEachShard)
+ ? expectedOplogRetDocsForEachShard
+ : [expectedOplogRetDocsForEachShard, expectedOplogRetDocsForEachShard]
});
}
@@ -135,12 +136,12 @@ verifyOnWholeCluster(resumeAfterToken,
2 /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{$match: {ns: {db: dbName, coll: "coll.coll3"}}},
- {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]}},
- 2 /* expectedOplogRetDocsForEachShard */);
+ {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]}},
+ [2, 1] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{$match: {$expr: {$eq: ["$ns", {db: dbName, coll: "coll.coll3"}]}}},
- {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]}},
- 2 /* expectedOplogRetDocsForEachShard */);
+ {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]}},
+ [2, 1] /* expectedOplogRetDocsForEachShard */);
// Ensure that the '$match' on the namespace with only db component should not emit any document and
// the oplog should not return any documents.
@@ -200,19 +201,19 @@ verifyOnWholeCluster(resumeAfterToken,
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{$match: {$expr: {$eq: ["$ns.db", dbName]}}},
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
// These cases ensure that the '$match' on regex of namespace' db, should return documents for all
// collection and oplog should return all documents for each shard.
@@ -221,29 +222,29 @@ verifyOnWholeCluster(resumeAfterToken,
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(
resumeAfterToken,
{$match: {$expr: {$regexMatch: {input: "$ns.db", regex: "^change_stream_match_pushdown.*$"}}}},
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{$match: {"ns.db": /^(change_stream_match_pushdown.*$)/}},
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(
resumeAfterToken,
{
@@ -253,19 +254,19 @@ verifyOnWholeCluster(
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{$match: {"ns.db": /^(Change_Stream_MATCH_PUSHDOWN.*$)/i}},
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(
resumeAfterToken,
{
@@ -279,19 +280,19 @@ verifyOnWholeCluster(
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{$match: {"ns.db": /(^unknown$|^change_stream_match_pushdown.*$)/}},
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(
resumeAfterToken,
{
@@ -305,19 +306,19 @@ verifyOnWholeCluster(
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{$match: {"ns.db": /^unknown$|^change_stream_match_pushdown.*$/}},
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(
resumeAfterToken,
{
@@ -330,10 +331,10 @@ verifyOnWholeCluster(
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
// Ensure that the '$match' on non-existing db should not return any document and oplog should not
// return any document for each shard.
@@ -384,12 +385,12 @@ verifyOnWholeCluster(resumeAfterToken,
2 /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{$match: {"ns.coll": "coll.coll3"}},
- {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]}},
- 2 /* expectedOplogRetDocsForEachShard */);
+ {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]}},
+ [2, 1] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{$match: {$expr: {$eq: ["$ns.coll", "coll.coll3"]}}},
- {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]}},
- 2 /* expectedOplogRetDocsForEachShard */);
+ {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]}},
+ [2, 1] /* expectedOplogRetDocsForEachShard */);
// This group of tests ensures that the '$match' on the regex of the collection field path should
// emit only the required documents and oplog should return only required document(s) for each
@@ -412,12 +413,12 @@ verifyOnWholeCluster(resumeAfterToken,
2 /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{$match: {"ns.coll": /^col.*3/}},
- {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]}},
- 2 /* expectedOplogRetDocsForEachShard */);
+ {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]}},
+ [2, 1] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{$match: {$expr: {$regexMatch: {input: "$ns.coll", regex: "^col.*3"}}}},
- {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]}},
- 2 /* expectedOplogRetDocsForEachShard */);
+ {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]}},
+ [2, 1] /* expectedOplogRetDocsForEachShard */);
// This group of tests ensures that the '$match' on the regex matching all collections should return
// documents from all collection and oplog should return all document for each shard.
@@ -426,38 +427,38 @@ verifyOnWholeCluster(resumeAfterToken,
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{$match: {$expr: {$regexMatch: {input: "$ns.coll", regex: "^col.*"}}}},
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{$match: {"ns.coll": /^CoLL.*/i}},
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(
resumeAfterToken,
{$match: {$expr: {$regexMatch: {input: "$ns.coll", regex: "^CoLL.*", options: "i"}}}},
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
// Ensure that the '$match' on the regex matching 3 collection should return documents from these
// collections and oplog should return required documents for each shard.
@@ -466,29 +467,29 @@ verifyOnWholeCluster(resumeAfterToken,
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]}
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]}
},
- 5 /* expectedOplogRetDocsForEachShard */);
+ [5, 4] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(
resumeAfterToken,
{$match: {$expr: {$regexMatch: {input: "$ns.coll", regex: "^col.*1$|^col.*2$|^col.*3$"}}}},
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]}
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]}
},
- 5 /* expectedOplogRetDocsForEachShard */);
+ [5, 4] /* expectedOplogRetDocsForEachShard */);
// Ensure that the '$match' on the regex to exclude 'coll1', 'coll2' and 'coll4' should return only
// documents from 'coll.coll3' and oplog should return required documents for each shard.
verifyOnWholeCluster(resumeAfterToken,
{$match: {"ns.coll": /^coll[^124]/}},
- {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]}},
- 2 /* expectedOplogRetDocsForEachShard */);
+ {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]}},
+ [2, 1] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{$match: {$expr: {$regexMatch: {input: "$ns.coll", regex: "^coll[^124]"}}}},
- {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]}},
- 2 /* expectedOplogRetDocsForEachShard */);
+ {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]}},
+ [2, 1] /* expectedOplogRetDocsForEachShard */);
// Ensure that the '$match' on non-existing collection should not return any document and oplog
// should not return any document for each shard.
@@ -528,19 +529,19 @@ verifyOnWholeCluster(resumeAfterToken,
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{$match: {$expr: {$in: ["$ns.db", [dbName]]}}},
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
// This group of tests ensures that '$in' and equivalent '$expr' expression on regex matching the db
// name should return all documents and oplog should return all documents for each shard.
@@ -549,29 +550,29 @@ verifyOnWholeCluster(resumeAfterToken,
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(
resumeAfterToken,
{$match: {$expr: {$or: [{$regexMatch: {input: "$ns.db", regex: "^change_stream_match.*$"}}]}}},
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{$match: {"ns.db": {$in: [/^change_stream_MATCH.*$/i]}}},
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(
resumeAfterToken,
{
@@ -586,10 +587,10 @@ verifyOnWholeCluster(
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
// Ensure that an empty '$in' on db path should not match any collection and oplog should not return
// any document for each shard.
@@ -607,10 +608,10 @@ verifyOnWholeCluster(resumeAfterToken,
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 9 /* expectedOplogRetDocsForEachShard */);
+ [9, 8] /* expectedOplogRetDocsForEachShard */);
// Ensure tht '$expr' with mix of valid and invalid db names should return required documents at the
// oplog for each shard.
@@ -619,10 +620,10 @@ verifyOnWholeCluster(resumeAfterToken,
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
// Ensure that '$in' on db path with mix of string and regex can be rewritten and oplog should
// return '0' document for each shard.
@@ -696,10 +697,10 @@ verifyOnWholeCluster(resumeAfterToken,
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{
$match: {
@@ -713,19 +714,19 @@ verifyOnWholeCluster(resumeAfterToken,
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{$match: {"ns.coll": {$in: [/^COLL.*$/i]}}},
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(
resumeAfterToken,
{
@@ -740,10 +741,10 @@ verifyOnWholeCluster(
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
// Ensure that an empty '$in' should not match any collection and oplog should not return any
// document for each shard.
@@ -757,7 +758,7 @@ verifyOnWholeCluster(resumeAfterToken,
verifyOnWholeCluster(resumeAfterToken,
{$match: {"ns.coll": {$in: ["coll1", 1]}}},
{coll1: {insert: [1, 2]}},
- 9 /* expectedOplogRetDocsForEachShard */);
+ [9, 8] /* expectedOplogRetDocsForEachShard */);
// Ensure that '$expr' on '$in' with mix of valid and invalid collections should return only
// required documents at oplog for each shard.
@@ -772,9 +773,9 @@ verifyOnWholeCluster(resumeAfterToken,
{$match: {"ns.coll": {$in: ["coll1", /^coll.*3$/]}}},
{
coll1: {insert: [1, 2]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
},
- 3 /* expectedOplogRetDocsForEachShard */);
+ [3, 2] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{
$match: {
@@ -788,9 +789,9 @@ verifyOnWholeCluster(resumeAfterToken,
},
{
coll1: {insert: [1, 2]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
},
- 3 /* expectedOplogRetDocsForEachShard */);
+ [3, 2] /* expectedOplogRetDocsForEachShard */);
// Ensure that '$in' and equivalent '$expr' expression with mix of string and regex can be rewritten
// and oplog should return '0' document for each shard.
@@ -819,52 +820,52 @@ verifyOnWholeCluster(resumeAfterToken,
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]},
"other_coll": {insert: [1, 2]}
},
- 9 /* expectedOplogRetDocsForEachShard */);
+ [9, 8] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{$match: {"ns.db": {$nin: ["unknown"]}}},
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]},
"other_coll": {insert: [1, 2]}
},
- 9 /* expectedOplogRetDocsForEachShard */);
+ [9, 8] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{$match: {$expr: {$not: {$or: [{$eq: ["$ns.db", "unknown"]}]}}}},
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]},
"other_coll": {insert: [1, 2]}
},
- 9 /* expectedOplogRetDocsForEachShard */);
+ [9, 8] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(resumeAfterToken,
{$match: {"ns.db": {$nin: [/^unknown$/]}}},
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]},
"other_coll": {insert: [1, 2]}
},
- 9 /* expectedOplogRetDocsForEachShard */);
+ [9, 8] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(
resumeAfterToken,
{$match: {$expr: {$not: {$or: [{$regexMatch: {input: "$ns.db", regex: "^unknown$"}}]}}}},
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]},
"other_coll": {insert: [1, 2]}
},
- 9 /* expectedOplogRetDocsForEachShard */);
+ [9, 8] /* expectedOplogRetDocsForEachShard */);
// These group of tests ensure that '$nin' and equivalent '$expr' expression on matching db name
// should only return documents from unmonitored db and oplog should return only required documents
@@ -900,49 +901,41 @@ verifyOnWholeCluster(
// Ensure that '$nin' and equivalent '$expr' expression on multiple collections should return the
// required documents and oplog should return required documents for each shard.
-verifyOnWholeCluster(resumeAfterToken,
- {$match: {"ns.coll": {$nin: ["coll1", "coll2", "coll4"]}}},
- {
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
- "other_coll": {insert: [1, 2]}
- },
- 3 /* expectedOplogRetDocsForEachShard */);
-verifyOnWholeCluster(resumeAfterToken,
- {$match: {$expr: {$not: {$in: ["$ns.coll", ["coll1", "coll2", "coll4"]]}}}},
- {
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
- "other_coll": {insert: [1, 2]}
- },
- 3 /* expectedOplogRetDocsForEachShard */);
+verifyOnWholeCluster(
+ resumeAfterToken,
+ {$match: {"ns.coll": {$nin: ["coll1", "coll2", "coll4"]}}},
+ {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]}, "other_coll": {insert: [1, 2]}},
+ [3, 2] /* expectedOplogRetDocsForEachShard */);
+verifyOnWholeCluster(
+ resumeAfterToken,
+ {$match: {$expr: {$not: {$in: ["$ns.coll", ["coll1", "coll2", "coll4"]]}}}},
+ {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]}, "other_coll": {insert: [1, 2]}},
+ [3, 2] /* expectedOplogRetDocsForEachShard */);
// Ensure that '$nin' and equivalent '$expr' expression on regex of multiple collections should
// return the required documents and oplog should return required documents for each shard.
-verifyOnWholeCluster(resumeAfterToken,
- {$match: {"ns.coll": {$nin: [/^coll1$/, /^coll2$/, /^coll4$/]}}},
- {
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
- "other_coll": {insert: [1, 2]}
- },
- 3 /* expectedOplogRetDocsForEachShard */);
-verifyOnWholeCluster(resumeAfterToken,
- {
- $match: {
- $expr: {
- $not: {
- $or: [
- {$regexMatch: {input: "$ns.coll", regex: "^coll1$"}},
- {$regexMatch: {input: "$ns.coll", regex: "^coll2$"}},
- {$regexMatch: {input: "$ns.coll", regex: "^coll4$"}}
- ]
- }
- }
- }
- },
- {
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
- "other_coll": {insert: [1, 2]}
- },
- 3 /* expectedOplogRetDocsForEachShard */);
+verifyOnWholeCluster(
+ resumeAfterToken,
+ {$match: {"ns.coll": {$nin: [/^coll1$/, /^coll2$/, /^coll4$/]}}},
+ {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]}, "other_coll": {insert: [1, 2]}},
+ [3, 2] /* expectedOplogRetDocsForEachShard */);
+verifyOnWholeCluster(
+ resumeAfterToken,
+ {
+ $match: {
+ $expr: {
+ $not: {
+ $or: [
+ {$regexMatch: {input: "$ns.coll", regex: "^coll1$"}},
+ {$regexMatch: {input: "$ns.coll", regex: "^coll2$"}},
+ {$regexMatch: {input: "$ns.coll", regex: "^coll4$"}}
+ ]
+ }
+ }
+ }
+ },
+ {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]}, "other_coll": {insert: [1, 2]}},
+ [3, 2] /* expectedOplogRetDocsForEachShard */);
// Ensure that '$nin' and equivalent '$expr' expression on regex of matching all collections should
// return documents from unmonitored db and oplog should also return documentss for unmonitored db
@@ -974,11 +967,11 @@ verifyOnWholeCluster(resumeAfterToken,
{
coll1: {insert: [1, 2]},
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
"coll4": {insert: [7, 8, 9, 10, 11, 12]},
"other_coll": {insert: [1, 2]}
},
- 9 /* expectedOplogRetDocsForEachShard */);
+ [9, 8] /* expectedOplogRetDocsForEachShard */);
// Ensure that '$nin' with invalid collection cannot be rewritten and oplog should return all
// documents for each shard.
@@ -986,11 +979,11 @@ verifyOnWholeCluster(resumeAfterToken,
{$match: {"ns.coll": {$nin: ["coll1", 1]}}},
{
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
coll4: {insert: [7, 8, 9, 10, 11, 12]},
"other_coll": {insert: [1, 2]}
},
- 9 /* expectedOplogRetDocsForEachShard */);
+ [9, 8] /* expectedOplogRetDocsForEachShard */);
// Ensure that '$expr' with mix of valid and invalid collection should return required documents at
// the oplog for each shard.
@@ -998,39 +991,35 @@ verifyOnWholeCluster(resumeAfterToken,
{$match: {$expr: {$not: {$in: ["$ns.coll", ["coll1", 1]]}}}},
{
coll2: {insert: [3, 4], rename: ["newColl2", "newColl2"]},
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
+ "coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]},
coll4: {insert: [7, 8, 9, 10, 11, 12]},
"other_coll": {insert: [1, 2]}
},
- 8 /* expectedOplogRetDocsForEachShard */);
+ [8, 7] /* expectedOplogRetDocsForEachShard */);
// Ensure that '$nin' and equivalent '$expr' expression with mix of string and regex can be
// rewritten and oplog should return required documents for each shard.
-verifyOnWholeCluster(resumeAfterToken,
- {$match: {"ns.coll": {$nin: ["coll1", /^coll2$/, "coll4"]}}},
- {
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
- "other_coll": {insert: [1, 2]}
- },
- 3 /* expectedOplogRetDocsForEachShard */);
-verifyOnWholeCluster(resumeAfterToken,
- {
- $match: {
- $expr: {
- $not: {
- $or: [
- {$in: ["$ns.coll", ["coll1", "coll4"]]},
- {$regexMatch: {input: "$ns.coll", regex: "^coll2$"}},
- ]
- }
- }
- }
- },
- {
- "coll.coll3": {insert: [5, 6], drop: ["coll.coll3", "coll.coll3"]},
- "other_coll": {insert: [1, 2]}
- },
- 3 /* expectedOplogRetDocsForEachShard */);
+verifyOnWholeCluster(
+ resumeAfterToken,
+ {$match: {"ns.coll": {$nin: ["coll1", /^coll2$/, "coll4"]}}},
+ {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]}, "other_coll": {insert: [1, 2]}},
+ [3, 2] /* expectedOplogRetDocsForEachShard */);
+verifyOnWholeCluster(
+ resumeAfterToken,
+ {
+ $match: {
+ $expr: {
+ $not: {
+ $or: [
+ {$in: ["$ns.coll", ["coll1", "coll4"]]},
+ {$regexMatch: {input: "$ns.coll", regex: "^coll2$"}},
+ ]
+ }
+ }
+ }
+ },
+ {"coll.coll3": {insert: [5, 6], drop: ["coll.coll3"]}, "other_coll": {insert: [1, 2]}},
+ [3, 2] /* expectedOplogRetDocsForEachShard */);
// At this stage, the coll2 has been renamed to 'newColl2' and coll3 has been dropped. The test from
// here will drop the database and ensure that the 'ns' filter when applied over the collection
@@ -1046,30 +1035,33 @@ const secondResumeAfterToken =
assert.commandWorked(db.dropDatabase());
// This group of tests ensures that the match on 'coll1' only sees the 'drop' events.
+// TODO SERVER-71364 for each call to verifyOnWholeCluster() below:
+// - convert {coll1: {drop: ["coll1", "coll1"]}} object into {coll1: {drop: ["coll1"]}}
+// - set every expectedOplogRetDocsForEachShard below to [1, 0]
verifyOnWholeCluster(secondResumeAfterToken,
{$match: {ns: {db: dbName, coll: "coll1"}}},
{coll1: {drop: ["coll1", "coll1"]}},
- 1 /* expectedOplogRetDocsForEachShard */);
+ [1, 1] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(secondResumeAfterToken,
{$match: {$expr: {$eq: ["$ns", {db: dbName, coll: "coll1"}]}}},
{coll1: {drop: ["coll1", "coll1"]}},
- 1 /* expectedOplogRetDocsForEachShard */);
+ [1, 1] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(secondResumeAfterToken,
{$match: {"ns.coll": "coll1"}},
{coll1: {drop: ["coll1", "coll1"]}},
- 1 /* expectedOplogRetDocsForEachShard */);
+ [1, 1] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(secondResumeAfterToken,
{$match: {$expr: {$eq: ["$ns.coll", "coll1"]}}},
{coll1: {drop: ["coll1", "coll1"]}},
- 1 /* expectedOplogRetDocsForEachShard */);
+ [1, 1] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(secondResumeAfterToken,
{$match: {"ns.coll": /^col.*1/}},
{coll1: {drop: ["coll1", "coll1"]}},
- 1 /* expectedOplogRetDocsForEachShard */);
+ [1, 1] /* expectedOplogRetDocsForEachShard */);
verifyOnWholeCluster(secondResumeAfterToken,
{$match: {$expr: {$regexMatch: {input: "$ns.coll", regex: "^col.*1"}}}},
{coll1: {drop: ["coll1", "coll1"]}},
- 1 /* expectedOplogRetDocsForEachShard */);
+ [1, 1] /* expectedOplogRetDocsForEachShard */);
// Ensure that the '$ns' object containing only 'db' should see only the 'dropDatabase' event and
// only the required documents gets returned at the oplog for each shard.
diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_multi_update_delete_change_streams.js b/jstests/concurrency/fsm_workloads/random_moveChunk_multi_update_delete_change_streams.js
index f9035b2a268..5778f947427 100644
--- a/jstests/concurrency/fsm_workloads/random_moveChunk_multi_update_delete_change_streams.js
+++ b/jstests/concurrency/fsm_workloads/random_moveChunk_multi_update_delete_change_streams.js
@@ -248,19 +248,9 @@ var $config = extendWorkload($config, function($config, $super) {
}
}
- let previousWritePeriodicNoops;
-
$config.setup = function setup(db, collName, cluster) {
$super.setup.apply(this, arguments);
- // Set 'writePeriodicNoops' to ensure liveness on change stream events in the case where one
- // of the shards has no changes to report.
- cluster.executeOnMongodNodes((db) => {
- const res = db.adminCommand({setParameter: 1, writePeriodicNoops: true});
- assert.commandWorked(res);
- previousWritePeriodicNoops = res.was;
- });
-
// Set the 'x' field to mirror the '_id' and 'skey' fields. 'x' will be used as query for
// {multi: true} writes.
db[collName].find({}).forEach(doc => {
@@ -280,19 +270,39 @@ var $config = extendWorkload($config, function($config, $super) {
};
$config.teardown = function teardown(db, collName, cluster) {
- cluster.executeOnMongodNodes((db) => {
- assert.commandWorked(
- db.adminCommand({setParameter: 1, writePeriodicNoops: previousWritePeriodicNoops}));
- });
-
// Drop the collection as to have a sentinel event (drop) on the change stream.
assertAlways(db[collName].drop());
- // Validate the change stream events.
+ // Validate the change stream events after setting 'writePeriodicNoops' on all the nodes of
+ // the cluster to ensure liveness in case there are nodes with no events to report.
+ let previousWritePeriodicNoopsOnShards;
+ let previousWritePeriodicNoopsOnConfigServer;
+
+ cluster.executeOnMongodNodes((db) => {
+ const res = db.adminCommand({setParameter: 1, writePeriodicNoops: true});
+ assert.commandWorked(res);
+ previousWritePeriodicNoopsOnShards = res.was;
+ });
+ cluster.executeOnConfigNodes((db) => {
+ const res = db.adminCommand({setParameter: 1, writePeriodicNoops: true});
+ assert.commandWorked(res);
+ previousWritePeriodicNoopsOnConfigServer = res.was;
+ });
+
var startAtOperationTime =
Timestamp(this.startAtOperationTime.t, this.startAtOperationTime.i);
checkChangeStream(db, collName, startAtOperationTime);
+ // Restore the original configuration.
+ cluster.executeOnMongodNodes((db) => {
+ assert.commandWorked(db.adminCommand(
+ {setParameter: 1, writePeriodicNoops: previousWritePeriodicNoopsOnShards}));
+ });
+ cluster.executeOnConfigNodes((db) => {
+ assert.commandWorked(db.adminCommand(
+ {setParameter: 1, writePeriodicNoops: previousWritePeriodicNoopsOnConfigServer}));
+ });
+
$super.teardown.apply(this, arguments);
};
diff --git a/jstests/sharding/change_stream_metadata_notifications.js b/jstests/sharding/change_stream_metadata_notifications.js
index 6d4c386846e..3f66edafe9e 100644
--- a/jstests/sharding/change_stream_metadata_notifications.js
+++ b/jstests/sharding/change_stream_metadata_notifications.js
@@ -13,6 +13,7 @@ const st = new ShardingTest({
rs: {
nodes: 1,
enableMajorityReadConcern: '',
+ setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
}
});
diff --git a/src/mongo/db/s/drop_collection_coordinator.cpp b/src/mongo/db/s/drop_collection_coordinator.cpp
index bd4c4c5d6a1..bccdfc0a3da 100644
--- a/src/mongo/db/s/drop_collection_coordinator.cpp
+++ b/src/mongo/db/s/drop_collection_coordinator.cpp
@@ -226,7 +226,7 @@ ExecutorFuture<void> DropCollectionCoordinator::_runImpl(
participants,
**executor,
getCurrentSession(),
- false /*fromMigrate*/);
+ true /*fromMigrate*/);
// The sharded collection must be dropped on the primary shard after it has been
// dropped on all of the other shards to ensure it can only be re-created as
@@ -239,6 +239,9 @@ ExecutorFuture<void> DropCollectionCoordinator::_runImpl(
getCurrentSession(),
false /*fromMigrate*/);
+ // Remove potential query analyzer document only after purging the collection from
+ // the catalog. This ensures no leftover documents referencing an old incarnation of
+ // a collection.
sharding_ddl_util::removeQueryAnalyzerMetadataFromConfig(opCtx, nss(), boost::none);
ShardingLogging::get(opCtx)->logChange(opCtx, "dropCollection", nss().ns());
diff --git a/src/mongo/db/s/sharding_ddl_util.cpp b/src/mongo/db/s/sharding_ddl_util.cpp
index 2b9756eb445..383264d9922 100644
--- a/src/mongo/db/s/sharding_ddl_util.cpp
+++ b/src/mongo/db/s/sharding_ddl_util.cpp
@@ -654,17 +654,25 @@ void ensureCollectionDroppedNoChangeEvent(OperationContext* opCtx,
"mongo::sharding_ddl_util::ensureCollectionDroppedNoChangeEvent",
nss.toString(),
[&] {
- AutoGetCollection coll(opCtx, nss, MODE_X);
- if (!coll || (uuid && coll->uuid() != uuid)) {
- // If the collection doesn't exist or exists with a different UUID,
- // then the requested collection has been dropped already.
+ try {
+ AutoGetCollection coll(opCtx, nss, MODE_X);
+ if (!coll || (uuid && coll->uuid() != uuid)) {
+ // If the collection doesn't exist or exists with a different
+ // UUID, then the requested collection has been dropped already.
+ return;
+ }
+
+ WriteUnitOfWork wuow(opCtx);
+ uassertStatusOK(coll.getDb()->dropCollectionEvenIfSystem(
+ opCtx, nss, {} /* dropOpTime */, true /* markFromMigrate */));
+ wuow.commit();
+ } catch (const ExceptionFor<ErrorCodes::InvalidViewDefinition>&) {
+ // AutoGetCollection may raise this exception when the collection
+ // doesn't exist and the CollectionCatalog starts looking into the
+ // list of existing views; the error can be ignored, and the
+ // collection may be considered as already dropped.
return;
}
-
- WriteUnitOfWork wuow(opCtx);
- uassertStatusOK(coll.getDb()->dropCollectionEvenIfSystem(
- opCtx, nss, {} /* dropOpTime */, true /* markFromMigrate */));
- wuow.commit();
});
}
} // namespace sharding_ddl_util