summaryrefslogtreecommitdiff
path: root/jstests
diff options
context:
space:
mode:
authorArun Banala <arun.banala@mongodb.com>2020-08-18 16:20:50 +0100
committerEvergreen Agent <no-reply@evergreen.mongodb.com>2020-09-11 08:37:43 +0000
commit8f7a5173f865ceb975d6a45add855146c5f052da (patch)
treec89d5df4f72d5bf567c10a459735d2229fbf6fe2 /jstests
parent6cf997c9313d2bd26ae1acf4141748c2cd6c092c (diff)
downloadmongo-8f7a5173f865ceb975d6a45add855146c5f052da.tar.gz
SERVER-48390 Exhaust pending calls when $group with $accumulator runs out of memory
(cherry picked from commit bf48331b0343b191c0d94aef888cdec471a6508b)
Diffstat (limited to 'jstests')
-rw-r--r--jstests/aggregation/accumulators/accumulator_js_size_limits.js42
1 files changed, 42 insertions, 0 deletions
diff --git a/jstests/aggregation/accumulators/accumulator_js_size_limits.js b/jstests/aggregation/accumulators/accumulator_js_size_limits.js
index cdf3680cf63..c0bed299703 100644
--- a/jstests/aggregation/accumulators/accumulator_js_size_limits.js
+++ b/jstests/aggregation/accumulators/accumulator_js_size_limits.js
@@ -108,4 +108,46 @@ res = runExample("$_id", {
lang: 'js',
});
assert.commandFailedWithCode(res, [ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed]);
+
+// Verify that having large number of documents doesn't cause the $accumulator to run out of memory.
+coll.drop();
+assert.commandWorked(coll.insert({groupBy: 1, largeField: "a".repeat(1000)}));
+assert.commandWorked(coll.insert({groupBy: 2, largeField: "a".repeat(1000)}));
+const largeAccumulator = {
+ $accumulator: {
+ init: function() {
+ return "";
+ },
+ accumulateArgs: [{fieldName: "$a"}],
+ accumulate: function(state, args) {
+ return state + "a";
+ },
+ merge: function(state1, state2) {
+ return state1 + state2;
+ },
+ finalize: function(state) {
+ return state.length;
+ }
+ }
+};
+res = coll.aggregate([
+ {$addFields: {a: {$range: [0, 1000000]}}},
+ {$unwind: "$a"}, // Create a number of documents to be executed by the accumulator.
+ {$group: {_id: "$groupBy", count: largeAccumulator}}
+ ])
+ .toArray();
+assert.sameMembers(res, [{_id: 1, count: 1000000}, {_id: 2, count: 1000000}]);
+
+// With $bucket.
+res =
+ coll.aggregate([
+ {$addFields: {a: {$range: [0, 1000000]}}},
+ {$unwind: "$a"}, // Create a number of documents to be executed by the accumulator.
+ {
+ $bucket:
+ {groupBy: "$groupBy", boundaries: [1, 2, 3], output: {count: largeAccumulator}}
+ }
+ ])
+ .toArray();
+assert.sameMembers(res, [{_id: 1, count: 1000000}, {_id: 2, count: 1000000}]);
})();