summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMihai Andrei <mihai.andrei@10gen.com>2020-06-26 14:51:56 -0400
committerMihai Andrei <mihai.andrei@10gen.com>2020-07-10 09:48:38 -0400
commit951f018c71408248e41d6354c5becf9714075213 (patch)
tree974b2692bf99d1a37f6b1b160b631734f7fc3331
parent946e1ca19f65925bfeb1551dad00b2d36246fcfe (diff)
downloadmongo-951f018c71408248e41d6354c5becf9714075213.tar.gz
SERVER-49150 Make merge_causes_infinite_loop.js more robust
(cherry picked from commit 9c61abb63b5acdc5f2e99c1185bb3be3d7342ac9)
-rw-r--r--jstests/noPassthrough/merge_causes_infinite_loop.js38
1 files changed, 28 insertions, 10 deletions
diff --git a/jstests/noPassthrough/merge_causes_infinite_loop.js b/jstests/noPassthrough/merge_causes_infinite_loop.js
index 166bb2e1b63..8b96d433d3b 100644
--- a/jstests/noPassthrough/merge_causes_infinite_loop.js
+++ b/jstests/noPassthrough/merge_causes_infinite_loop.js
@@ -23,20 +23,28 @@ const largeNum = 1000 * 1000 * 1000;
// possible.
assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryExecYieldPeriodMS: 1}));
-// Insert documents.
+// Insert documents into both collections. We populate the output collection to verify that
+// updates behave as expected when the source collection isn't the same as the target collection.
+//
// Note that the largeArray field is included to force documents to be written to disk and not
// simply be updated in the cache. This is crucial to exposing the halloween problem as the
-// physical location of each document needs to change for each document to visited and updated
+// physical location of each document needs to change for each document to be visited and updated
// multiple times.
-var bulk = coll.initializeUnorderedBulkOp();
-for (let i = 0; i < nDocs; i++) {
- bulk.insert({_id: i, a: i * largeNum, largeArray: (new Array(1024 * 1024).join("a"))});
+function insertDocuments(collObject) {
+ const bulk = collObject.initializeUnorderedBulkOp();
+ for (let i = 0; i < nDocs; i++) {
+ bulk.insert({_id: i, a: i * largeNum, largeArray: (new Array(1024 * 1024).join("a"))});
+ }
+ assert.commandWorked(bulk.execute());
}
-assert.commandWorked(bulk.execute());
+
+insertDocuments(coll);
+insertDocuments(out);
// Build an index over a, the field to be updated, so that updates will push modified documents
// forward in the index when outputting to the collection being aggregated.
assert.commandWorked(coll.createIndex({a: 1}));
+assert.commandWorked(out.createIndex({a: 1}));
// Returns a pipeline which outputs to the specified collection.
function pipeline(outColl) {
@@ -49,10 +57,20 @@ function pipeline(outColl) {
const differentCollPipeline = pipeline(out.getName());
const sameCollPipeline = pipeline(coll.getName());
-// Outputting the result of this pipeline to a different collection will not time out nor will any
-// of the computed values overflow.
-assert.commandWorked(db.runCommand(
- {aggregate: coll.getName(), pipeline: differentCollPipeline, cursor: {}, maxTimeMS: 2500}));
+// Targeting a collection that is not the collection being agggregated over will result in each
+// document's value of 'a' being updated exactly once.
+assert.commandWorked(
+ db.runCommand({aggregate: coll.getName(), pipeline: differentCollPipeline, cursor: {}}));
+
+// Filter out 'largeArray' as we are only interested in verifying the value of "a" in each
+// document.
+const result = out.find({}, {largeArray: 0}).toArray();
+
+for (const doc of result) {
+ assert(doc.hasOwnProperty("a"), doc);
+ const expectedVal = doc["_id"] * 2 * largeNum;
+ assert.eq(doc["a"], expectedVal, doc);
+}
// Because this pipeline writes to the collection being aggregated, it will cause documents to be
// updated and pushed forward indefinitely. This will cause the computed values to eventually