summaryrefslogtreecommitdiff
path: root/jstests/noPassthroughWithMongod/mr_writeconflict.js
blob: 6dfd318919614f41db7a9a66a84649cdcf9c6fd5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
// SERVER-16262: Write-conflict during map-reduce operations

(function() {
"use strict";

load('jstests/libs/parallelTester.js');

var makeDoc = function(keyLimit, valueLimit) {
    return {_id: ObjectId(), key: Random.randInt(keyLimit), value: Random.randInt(valueLimit)};
};

var main = function() {
    function mapper() {
        var obj = {};
        obj[this.value] = 1;
        emit(this.key, obj);
    }

    function reducer(key, values) {
        var res = {};

        values.forEach(function(obj) {
            Object.keys(obj).forEach(function(value) {
                if (!res.hasOwnProperty(value)) {
                    res[value] = 0;
                }
                res[value] += obj[value];
            });
        });

        return res;
    }

    for (var i = 0; i < 10; i++) {
        // Have all threads combine their results into the same collection
        var res = db.source.mapReduce(mapper, reducer, {out: {reduce: 'dest'}});
        assert.commandWorked(res);
    }
};

Random.setRandomSeed();

var numDocs = 200;
var bulk = db.source.initializeUnorderedBulkOp();
var i;
for (i = 0; i < numDocs; ++i) {
    var doc = makeDoc(numDocs / 100, numDocs / 10);
    bulk.insert(doc);
}

var res = bulk.execute();
assert.commandWorked(res);
assert.eq(numDocs, res.nInserted);

db.dest.drop();
assert.commandWorked(db.createCollection('dest'));

var numThreads = 6;
var t = [];
for (i = 0; i < numThreads - 1; ++i) {
    t[i] = new Thread(main);
    t[i].start();
}

main();
for (i = 0; i < numThreads - 1; ++i) {
    t[i].join();
}
}());