summaryrefslogtreecommitdiff
path: root/jstests/sharding/merge_with_chunk_migrations.js
diff options
context:
space:
mode:
authorArun Banala <arun.banala@mongodb.com>2019-06-06 14:50:27 +0100
committerArun Banala <arun.banala@mongodb.com>2019-06-11 15:04:33 +0100
commit52149b64892ba7a26f3cb1aee569baa0fae8450e (patch)
tree343ab2c601f7424808cce894d2c8653e88c3ec5f /jstests/sharding/merge_with_chunk_migrations.js
parent3dfc6949c246c833aebca65d4e9ac41bb2cc9fd9 (diff)
downloadmongo-52149b64892ba7a26f3cb1aee569baa0fae8450e.tar.gz
SERVER-40961 Extend translated $out tests to support all $merge modes
(cherry picked from commit 681fde4b157d1b534ccc08db6bc5e4bebd812951)
Diffstat (limited to 'jstests/sharding/merge_with_chunk_migrations.js')
-rw-r--r--jstests/sharding/merge_with_chunk_migrations.js61
1 files changed, 40 insertions, 21 deletions
diff --git a/jstests/sharding/merge_with_chunk_migrations.js b/jstests/sharding/merge_with_chunk_migrations.js
index a5667484870..2b9ba4256fa 100644
--- a/jstests/sharding/merge_with_chunk_migrations.js
+++ b/jstests/sharding/merge_with_chunk_migrations.js
@@ -3,6 +3,8 @@
(function() {
'use strict';
+ load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode.
+
const st = new ShardingTest({shards: 2, rs: {nodes: 1}});
const mongosDB = st.s.getDB(jsTestName());
@@ -17,10 +19,26 @@
}
function runMergeWithMode(whenMatchedMode, whenNotMatchedMode, shardedColl) {
+ assert.commandWorked(targetColl.remove({}));
+
+ // For modes 'whenNotMatchedMode:fail/discard', the $merge will not insert the expected
+ // documents, causing the assertion below to fail. To avoid that, we match the documents in
+ // target collection with the documents in source.
+ if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
+ assert.commandWorked(targetColl.insert({_id: 0, shardKey: -1}));
+ assert.commandWorked(targetColl.insert({_id: 1, shardKey: 1}));
+ }
+
// Set the failpoint to hang in the first call to DocumentSourceCursor's getNext().
setAggHang("alwaysOn");
let comment = whenMatchedMode + "_" + whenNotMatchedMode + "_" + shardedColl.getName();
+
+ const mergeSpec = {
+ into: targetColl.getName(),
+ whenMatched: whenMatchedMode,
+ whenNotMatched: whenNotMatchedMode
+ };
// The $_internalInhibitOptimization stage is added to the pipeline to prevent the pipeline
// from being optimized away after it's been split. Otherwise, we won't hit the failpoint.
let outFn = `
@@ -28,11 +46,7 @@
const sourceColl = sourceDB["${sourceColl.getName()}"];
sourceColl.aggregate([
{$_internalInhibitOptimization: {}},
- {$merge: {
- into: "${targetColl.getName()}",
- whenMatched: "${whenMatchedMode}",
- whenNotMatched: "${whenNotMatchedMode}"
- }}
+ {$merge: ${tojsononeline(mergeSpec)}}
],
{comment: "${comment}"});
`;
@@ -60,6 +74,15 @@
// Now both chunks are on shard0. Run a similar test except migrate the chunks back to
// shard1 in the middle of execution.
assert.commandWorked(targetColl.remove({}));
+
+ // For modes 'whenNotMatchedMode:fail/discard', the $merge will not insert the expected
+ // documents, causing the assertion below to fail. To avoid that, we match the documents in
+ // target collection with the documents in source.
+ if (whenNotMatchedMode == "fail" || whenNotMatchedMode == "discard") {
+ assert.commandWorked(targetColl.insert({_id: 0, shardKey: -1}));
+ assert.commandWorked(targetColl.insert({_id: 1, shardKey: 1}));
+ }
+
setAggHang("alwaysOn");
comment = comment + "_2";
// The $_internalInhibitOptimization stage is added to the pipeline to prevent the pipeline
@@ -69,11 +92,7 @@
const sourceColl = sourceDB["${sourceColl.getName()}"];
sourceColl.aggregate([
{$_internalInhibitOptimization: {}},
- {$merge: {
- into: "${targetColl.getName()}",
- whenMatched: "${whenMatchedMode}",
- whenNotMatched: "${whenNotMatchedMode}"
- }}
+ {$merge: ${tojsononeline(mergeSpec)}}
],
{comment: "${comment}"});
`;
@@ -100,33 +119,33 @@
// Reset the chunk distribution.
assert.commandWorked(st.s.adminCommand(
{moveChunk: shardedColl.getFullName(), find: {shardKey: -1}, to: st.shard0.shardName}));
-
- assert.commandWorked(targetColl.remove({}));
}
// Shard the source collection with shard key {shardKey: 1} and split into 2 chunks.
st.shardColl(sourceColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
// Write a document to each chunk of the source collection.
- assert.commandWorked(sourceColl.insert({shardKey: -1}));
- assert.commandWorked(sourceColl.insert({shardKey: 1}));
+ assert.commandWorked(sourceColl.insert({_id: 0, shardKey: -1}));
+ assert.commandWorked(sourceColl.insert({_id: 1, shardKey: 1}));
- runMergeWithMode("replace", "insert", sourceColl);
- runMergeWithMode("fail", "insert", sourceColl);
+ withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ runMergeWithMode(whenMatchedMode, whenNotMatchedMode, sourceColl);
+ });
// Run a similar test with chunk migrations on the output collection instead.
sourceColl.drop();
-
+ assert.commandWorked(targetColl.remove({}));
// Shard the output collection with shard key {shardKey: 1} and split into 2 chunks.
st.shardColl(targetColl.getName(), {shardKey: 1}, {shardKey: 0}, false, mongosDB.getName());
// Write two documents in the source collection that should target the two chunks in the target
// collection.
- assert.commandWorked(sourceColl.insert({shardKey: -1}));
- assert.commandWorked(sourceColl.insert({shardKey: 1}));
+ assert.commandWorked(sourceColl.insert({_id: 0, shardKey: -1}));
+ assert.commandWorked(sourceColl.insert({_id: 1, shardKey: 1}));
- runMergeWithMode("replace", "insert", targetColl);
- runMergeWithMode("fail", "insert", targetColl);
+ withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => {
+ runMergeWithMode(whenMatchedMode, whenNotMatchedMode, targetColl);
+ });
st.stop();
})();