summaryrefslogtreecommitdiff
path: root/jstests/sharding/bulk_shard_insert.js
blob: a8bea4e3fc83ba64cbbd96a191aa0d35fbe19d77 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
/**
 * Test bulk inserts running alonside the auto-balancer. Ensures that they do not conflict with each
 * other.
 *
 * This test is labeled resource intensive because its total io_write is 106MB compared to a median
 * of 5MB across all sharding tests in wiredTiger.
 * @tags: [resource_intensive]
 */
(function() {
'use strict';

var st = new ShardingTest({shards: 4, chunkSize: 1});

// Double the balancer interval to produce fewer migrations per unit time so that the test does not
// run out of stale shard version retries.
st._configServers.forEach((conn) => {
    conn.adminCommand({
        configureFailPoint: 'overrideBalanceRoundInterval',
        mode: 'alwaysOn',
        data: {intervalMs: 2000}
    });
});

assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
st.ensurePrimaryShard('TestDB', st.shard0.shardName);
assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {Counter: 1}}));

var db = st.s0.getDB('TestDB');
var coll = db.TestColl;

// Insert lots of bulk documents
var numDocs = 1000000;

var bulkSize = 4000;
var docSize = 128; /* bytes */
print("\n\n\nBulk size is " + bulkSize);

var data = "x";
while (Object.bsonsize({x: data}) < docSize) {
    data += data;
}

print("\n\n\nDocument size is " + Object.bsonsize({x: data}));

var docsInserted = 0;
var balancerOn = false;

/**
 * Ensures that the just inserted documents can be found.
 */
function checkDocuments() {
    var docsFound = coll.find({}, {_id: 0, Counter: 1}).toArray();
    var count = coll.find().count();

    if (docsFound.length != docsInserted) {
        print("Inserted " + docsInserted + " count : " + count +
              " doc count : " + docsFound.length);

        var allFoundDocsSorted = docsFound.sort(function(a, b) {
            return a.Counter - b.Counter;
        });

        var missingValueInfo;

        for (var i = 0; i < docsInserted; i++) {
            if (i != allFoundDocsSorted[i].Counter) {
                missingValueInfo = {expected: i, actual: allFoundDocsSorted[i].Counter};
                break;
            }
        }

        st.printShardingStatus();

        assert(
            false,
            'Inserted number of documents does not match the actual: ' + tojson(missingValueInfo));
    }
}

while (docsInserted < numDocs) {
    var currBulkSize = (numDocs - docsInserted > bulkSize) ? bulkSize : (numDocs - docsInserted);

    var bulk = [];
    for (var i = 0; i < currBulkSize; i++) {
        bulk.push({Counter: docsInserted, hi: "there", i: i, x: data});
        docsInserted++;
    }

    assert.commandWorked(coll.insert(bulk));

    if (docsInserted % 10000 == 0) {
        print("Inserted " + docsInserted + " documents.");
        st.printShardingStatus();
    }

    if (docsInserted > numDocs / 3 && !balancerOn) {
        // Do one check before we turn balancer on
        checkDocuments();
        print('Turning on balancer after ' + docsInserted + ' documents inserted.');
        st.startBalancer();
        balancerOn = true;
    }
}

checkDocuments();

st.stop();
})();