summaryrefslogtreecommitdiff
path: root/jstests/core/mr_bigobject.js
blob: ed534f8fc67a6088d2588f89126359ccafce7e43 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
// Confirms that the mapReduce reduce function will process data sets larger than 16MB.
// @tags: [
//   # mapReduce does not support afterClusterTime.
//   does_not_support_causal_consistency,
//   does_not_support_stepdowns,
//   requires_fastcount,
//   uses_map_reduce_with_temp_collections,
// ]
(function() {
"use strict";
const coll = db.mr_bigobject;
coll.drop();
const outputColl = db.mr_bigobject_out;
outputColl.drop();

const largeString = Array.from({length: 6 * 1024 * 1024}, _ => "a").join("");

const bulk = coll.initializeUnorderedBulkOp();
for (let i = 0; i < 5; i++)
    bulk.insert({_id: i, s: largeString});
assert.commandWorked(bulk.execute());

// MapReduce succeeds when the reduce function processes single-key data sets larger than 16MB.
const mapFn = function() {
    emit(1, this.s);
};

let reduceFn = function(k, v) {
    return 1;
};

assert.commandWorked(coll.mapReduce(mapFn, reduceFn, {out: {"merge": outputColl.getName()}}));
assert.eq([{_id: 1, value: 1}], outputColl.find().toArray());

// The reduce function processes the expected amount of data.
reduceFn = function(k, v) {
    total = 0;
    for (let i = 0; i < v.length; i++) {
        const x = v[i];
        if (typeof (x) == "number")
            total += x;
        else
            total += x.length;
    }
    return total;
};

assert.commandWorked(coll.mapReduce(mapFn, reduceFn, {out: {"merge": outputColl.getName()}}));
assert.eq([{_id: 1, value: coll.count() * largeString.length}], outputColl.find().toArray());
}());