summaryrefslogtreecommitdiff
path: root/jstests/concurrency/fsm_workloads/map_reduce_with_chunk_migrations.js
blob: 645fa93b50d04a8540d8ce2caad7e7c7a96a9d8a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
'use strict';

/**
 * map_reduce_with_chunk_migrations.js
 *
 * This tests exercises mapReduce on a collection during chunk migrations. If extending this
 * workload, consider overriding the following:
 *
 * $config.data.collWithMigrations: collection to run chunk migrations against (default is the
 * input collection of the mapReduce).
 * $config.state.mapReduce: function to execute the mapReduce.
 *
 * @tags: [
 *  requires_sharding, assumes_balancer_off,
 *  assumes_autosplit_off,
 *  requires_non_retryable_writes,
 *  # mapReduce does not support afterClusterTime.
 *  does_not_support_causal_consistency
 * ]
 */
load('jstests/concurrency/fsm_libs/extend_workload.js');                     // for extendWorkload
load('jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js');  // for $config

var $config = extendWorkload($config, function($config, $super) {
    // The base setup will insert 'partitionSize' number of documents per thread, evenly
    // distributing across the chunks. Documents will only have the "_id" field.
    $config.data.partitionSize = 50;
    $config.threadCount = 2;
    $config.iterations = 100;
    $config.data.numDocs = $config.data.partitionSize * $config.threadCount;

    // By default, the collection that will be sharded with concurrent chunk migrations will be the
    // one that the aggregate is run against.
    $config.data.collWithMigrations = $config.collName;

    $config.transitions = {
        init: {mapReduce: 1},
        mapReduce: {
            moveChunk: 0.2,
            mapReduce: 0.8,
        },
        moveChunk: {mapReduce: 1},
    };

    /**
     * Moves a random chunk in the target collection.
     */
    $config.states.moveChunk = function moveChunk(db, collName, connCache) {
        $super.states.moveChunk.apply(this, [db, this.collWithMigrations, connCache]);
    };

    /**
     * Executes a mapReduce with output mode "replace".
     */
    $config.states.mapReduce = function mapReduce(db, collName, connCache) {
        const map = function() {
            emit(this._id, 1);
        };
        const reduce = function(k, values) {
            return Array.sum(values);
        };

        const res = db[collName].mapReduce(map, reduce, {out: {replace: this.resultsCollection}});
        assertWhenOwnColl.commandWorked(res);

        assert.eq(this.numDocs, db[this.resultsCollection].find().itcount());
    };

    /**
     * Uses the base class init() to initialize this thread for both collections.
     */
    $config.states.init = function init(db, collName, connCache) {
        $super.states.init.apply(this, [db, collName, connCache]);

        // Init the target collection in a similar manner, if it is different than the default
        // collection.
        if (collName != this.collWithMigrations) {
            $super.states.init.apply(this, [db, this.collWithMigrations, connCache]);
        }

        // Use a unique target collection name per thread to avoid colliding during the final rename
        // of the mapReduce.
        this.resultsCollection = "map_reduce_with_chunk_migrations_out_" + this.tid;
    };

    /**
     * Initializes the aggregate collection and the target collection for chunk migrations as
     * sharded with an even distribution across each thread ID.
     */
    $config.setup = function setup(db, collName, cluster) {
        $super.setup.apply(this, [db, collName, cluster]);

        if (collName != this.collWithMigrations) {
            // Setup the target collection in a similar manner. Note that the FSM infrastructure
            // will have already enabled sharded on collName, but we need to manually do it for the
            // output collection.
            cluster.shardCollection(db[this.collWithMigrations], this.shardKey, false);
            $super.setup.apply(this, [db, this.collWithMigrations, cluster]);
        }
    };

    return $config;
});