summaryrefslogtreecommitdiff
path: root/jstests/sharding
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/sharding')
-rw-r--r--jstests/sharding/change_streams.js1
-rw-r--r--jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js107
-rw-r--r--jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js79
-rw-r--r--jstests/sharding/lookup_change_stream_post_image_id_shard_key.js94
4 files changed, 280 insertions, 1 deletions
diff --git a/jstests/sharding/change_streams.js b/jstests/sharding/change_streams.js
index a2a1a83664a..3cb35d318bb 100644
--- a/jstests/sharding/change_streams.js
+++ b/jstests/sharding/change_streams.js
@@ -92,7 +92,6 @@
// Test that using change streams with any stages not allowed to run on mongos results in an
// error.
- assertErrorCode(mongosColl, [{$changeStream: {fullDocument: "updateLookup"}}], 40470);
assertErrorCode(
mongosColl, [{$changeStream: {}}, {$out: "shouldntWork"}], ErrorCodes.IllegalOperation);
diff --git a/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js b/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js
new file mode 100644
index 00000000000..022d5dd6172
--- /dev/null
+++ b/jstests/sharding/lookup_change_stream_post_image_compound_shard_key.js
@@ -0,0 +1,107 @@
+// Tests the behavior of looking up the post image for change streams on collections which are
+// sharded with a compound shard key.
+(function() {
+ "use strict";
+
+ // For supportsMajorityReadConcern().
+ load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+ if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+ }
+
+ const st = new ShardingTest({
+ shards: 2,
+ rs: {
+ nodes: 1,
+ enableMajorityReadConcern: '',
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
+ }
+ });
+
+ const mongosDB = st.s0.getDB(jsTestName());
+ const mongosColl = mongosDB[jsTestName()];
+
+ assert.commandWorked(mongosDB.dropDatabase());
+
+ // Enable sharding on the test DB and ensure its primary is shard0000.
+ assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+ st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+ // Shard the test collection with a compound shard key: a, b, c. Then split it into two chunks,
+ // and put one chunk on each shard.
+ assert.commandWorked(mongosDB.adminCommand(
+ {shardCollection: mongosColl.getFullName(), key: {a: 1, b: 1, c: 1}}));
+
+ // Split the collection into 2 chunks:
+ // [{a: MinKey, b: MinKey, c: MinKey}, {a: 1, b: MinKey, c: MinKey})
+ // and
+ // [{a: 1, b: MinKey, c: MinKey}, {a: MaxKey, b: MaxKey, c: MaxKey}).
+ assert.commandWorked(mongosDB.adminCommand(
+ {split: mongosColl.getFullName(), middle: {a: 1, b: MinKey, c: MinKey}}));
+
+ // Move the upper chunk to shard 1.
+ assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {a: 1, b: MinKey, c: MinKey},
+ to: st.rs1.getURL()
+ }));
+
+ const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}]);
+
+ const nDocs = 6;
+ const bValues = ["one", "two", "three", "four", "five", "six"];
+
+ // This shard key function results in 1/3rd of documents on shard0 and 2/3rds on shard1.
+ function shardKeyFromId(id) {
+ return {a: id % 3, b: bValues[id], c: id % 2};
+ }
+
+ // Do some writes.
+ for (let id = 0; id < nDocs; ++id) {
+ const documentKey = Object.merge({_id: id}, shardKeyFromId(id));
+ assert.writeOK(mongosColl.insert(documentKey));
+ assert.writeOK(mongosColl.update(documentKey, {$set: {updatedCount: 1}}));
+ }
+
+ for (let id = 0; id < nDocs; ++id) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "insert");
+ assert.eq(next.documentKey, {_id: id});
+
+ assert.soon(() => changeStream.hasNext());
+ next = changeStream.next();
+ assert.eq(next.operationType, "update");
+ assert.eq(next.documentKey, Object.merge(shardKeyFromId(id), {_id: id}));
+ assert.docEq(next.fullDocument,
+ Object.merge(shardKeyFromId(id), {_id: id, updatedCount: 1}));
+ }
+
+ // Test that the change stream can still see the updated post image, even if a chunk is
+ // migrated.
+ for (let id = 0; id < nDocs; ++id) {
+ const documentKey = Object.merge({_id: id}, shardKeyFromId(id));
+ assert.writeOK(mongosColl.update(documentKey, {$set: {updatedCount: 2}}));
+ }
+
+ // Move the upper chunk back to shard 0.
+ assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ find: {a: 1, b: MinKey, c: MinKey},
+ to: st.rs0.getURL()
+ }));
+
+ for (let id = 0; id < nDocs; ++id) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "update");
+ assert.eq(next.documentKey, Object.merge(shardKeyFromId(id), {_id: id}));
+ assert.docEq(next.fullDocument,
+ Object.merge(shardKeyFromId(id), {_id: id, updatedCount: 2}));
+ }
+
+ st.stop();
+})();
diff --git a/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js b/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js
new file mode 100644
index 00000000000..d2b130abf33
--- /dev/null
+++ b/jstests/sharding/lookup_change_stream_post_image_hashed_shard_key.js
@@ -0,0 +1,79 @@
+// Tests the behavior of looking up the post image for change streams on collections which are
+// sharded with a hashed shard key.
+(function() {
+ "use strict";
+
+ // For supportsMajorityReadConcern().
+ load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+ if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+ }
+
+ const st = new ShardingTest({
+ shards: 2,
+ enableBalancer: false,
+ rs: {
+ nodes: 1,
+ enableMajorityReadConcern: '',
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
+ }
+ });
+
+ const mongosDB = st.s0.getDB(jsTestName());
+ const mongosColl = mongosDB[jsTestName()];
+
+ assert.commandWorked(mongosDB.dropDatabase());
+
+ // Enable sharding on the test DB and ensure its primary is shard0000.
+ assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+ st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+ // Shard the test collection on the field "shardKey", and split it into two chunks.
+ assert.commandWorked(mongosDB.adminCommand({
+ shardCollection: mongosColl.getFullName(),
+ numInitialChunks: 2,
+ key: {shardKey: "hashed"}
+ }));
+
+ // Make sure the negative chunk is on shard 0.
+ assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ bounds: [{shardKey: MinKey}, {shardKey: NumberLong("0")}],
+ to: st.rs0.getURL()
+ }));
+
+ // Make sure the positive chunk is on shard 1.
+ assert.commandWorked(mongosDB.adminCommand({
+ moveChunk: mongosColl.getFullName(),
+ bounds: [{shardKey: NumberLong("0")}, {shardKey: MaxKey}],
+ to: st.rs1.getURL()
+ }));
+
+ const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}]);
+
+ // Write enough documents that we likely have some on each shard.
+ const nDocs = 1000;
+ for (let id = 0; id < nDocs; ++id) {
+ assert.writeOK(mongosColl.insert({_id: id, shardKey: id}));
+ assert.writeOK(mongosColl.update({shardKey: id}, {$set: {updatedCount: 1}}));
+ }
+
+ for (let id = 0; id < nDocs; ++id) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "insert");
+ // TODO SERVER-30599 this documentKey should contain the shard key.
+ assert.eq(next.documentKey, {_id: id});
+
+ assert.soon(() => changeStream.hasNext());
+ next = changeStream.next();
+ assert.eq(next.operationType, "update");
+ assert.eq(next.documentKey, {shardKey: id, _id: id});
+ assert.docEq(next.fullDocument, {_id: id, shardKey: id, updatedCount: 1});
+ }
+
+ st.stop();
+})();
diff --git a/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js b/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js
new file mode 100644
index 00000000000..5cd2a6fa5e0
--- /dev/null
+++ b/jstests/sharding/lookup_change_stream_post_image_id_shard_key.js
@@ -0,0 +1,94 @@
+// Tests the behavior of looking up the post image for change streams on collections which are
+// sharded with a key which is just the "_id" field.
+(function() {
+ "use strict";
+
+ // For supportsMajorityReadConcern().
+ load("jstests/multiVersion/libs/causal_consistency_helpers.js");
+
+ if (!supportsMajorityReadConcern()) {
+ jsTestLog("Skipping test since storage engine doesn't support majority read concern.");
+ return;
+ }
+
+ const st = new ShardingTest({
+ shards: 2,
+ rs: {
+ nodes: 1,
+ enableMajorityReadConcern: '',
+ // Use a higher frequency for periodic noops to speed up the test.
+ setParameter: {writePeriodicNoops: true, periodicNoopIntervalSecs: 1}
+ }
+ });
+
+ const mongosDB = st.s0.getDB(jsTestName());
+ const mongosColl = mongosDB[jsTestName()];
+
+ assert.commandWorked(mongosDB.dropDatabase());
+
+ // Enable sharding on the test DB and ensure its primary is shard0000.
+ assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()}));
+ st.ensurePrimaryShard(mongosDB.getName(), st.rs0.getURL());
+
+ // Shard the test collection on _id.
+ assert.commandWorked(
+ mongosDB.adminCommand({shardCollection: mongosColl.getFullName(), key: {_id: 1}}));
+
+ // Split the collection into 2 chunks: [MinKey, 0), [0, MaxKey).
+ assert.commandWorked(
+ mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 0}}));
+
+ // Move the [0, MaxKey) chunk to shard0001.
+ assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 1}, to: st.rs1.getURL()}));
+
+ // Write a document to each chunk.
+ assert.writeOK(mongosColl.insert({_id: -1}));
+ assert.writeOK(mongosColl.insert({_id: 1}));
+
+ const changeStream = mongosColl.aggregate([{$changeStream: {fullDocument: "updateLookup"}}]);
+
+ // Do some writes.
+ assert.writeOK(mongosColl.insert({_id: 1000}));
+ assert.writeOK(mongosColl.insert({_id: -1000}));
+ assert.writeOK(mongosColl.update({_id: 1000}, {$set: {updatedCount: 1}}));
+ assert.writeOK(mongosColl.update({_id: -1000}, {$set: {updatedCount: 1}}));
+
+ for (let nextId of[1000, -1000]) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "insert");
+ assert.eq(next.documentKey, {_id: nextId});
+ }
+
+ for (let nextId of[1000, -1000]) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "update");
+ // Only the "_id" field is present in next.documentKey because the shard key is the _id.
+ assert.eq(next.documentKey, {_id: nextId});
+ assert.docEq(next.fullDocument, {_id: nextId, updatedCount: 1});
+ }
+
+ // Test that the change stream can still see the updated post image, even if a chunk is
+ // migrated.
+ assert.writeOK(mongosColl.update({_id: 1000}, {$set: {updatedCount: 2}}));
+ assert.writeOK(mongosColl.update({_id: -1000}, {$set: {updatedCount: 2}}));
+
+ // Split the [0, MaxKey) chunk into 2: [0, 500), [500, MaxKey).
+ assert.commandWorked(
+ mongosDB.adminCommand({split: mongosColl.getFullName(), middle: {_id: 500}}));
+ // Move the [500, MaxKey) chunk back to shard0000.
+ assert.commandWorked(mongosDB.adminCommand(
+ {moveChunk: mongosColl.getFullName(), find: {_id: 1000}, to: st.rs0.getURL()}));
+
+ for (let nextId of[1000, -1000]) {
+ assert.soon(() => changeStream.hasNext());
+ let next = changeStream.next();
+ assert.eq(next.operationType, "update");
+ assert.eq(next.documentKey, {_id: nextId});
+ assert.docEq(next.fullDocument, {_id: nextId, updatedCount: 2});
+ }
+
+ st.stop();
+})();