summaryrefslogtreecommitdiff
path: root/jstests/sharding/large_chunk.js
blob: 786ac576a0d11d672aeb8ff2f3249d8772a37aaa (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
/**
 * Where we test operations dealing with large chunks
 *
 * This test is labeled resource intensive because its total io_write is 220MB compared to a median
 * of 5MB across all sharding tests in wiredTiger. Its total io_write is 1160MB compared to a median
 * of 135MB in mmapv1.
 * @tags: [resource_intensive]
 */
(function() {
    'use strict';

    // Starts a new sharding environment limiting the chunk size to 1GB (highest value allowed).
    // Note that early splitting will start with a 1/4 of max size currently.
    var s = new ShardingTest({name: 'large_chunk', shards: 2, other: {chunkSize: 1024}});
    var db = s.getDB("test");

    //
    // Step 1 - Test moving a large chunk
    //

    // Turn on sharding on the 'test.foo' collection and generate a large chunk
    assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
    s.ensurePrimaryShard('test', s.shard1.shardName);

    var bigString = "";
    while (bigString.length < 10000) {
        bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
    }

    var inserted = 0;
    var num = 0;
    var bulk = db.foo.initializeUnorderedBulkOp();
    while (inserted < (400 * 1024 * 1024)) {
        bulk.insert({_id: num++, s: bigString});
        inserted += bigString.length;
    }
    assert.writeOK(bulk.execute());

    assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));

    assert.eq(1, s.config.chunks.count({"ns": "test.foo"}), "step 1 - need one large chunk");

    var primary = s.getPrimaryShard("test").getDB("test");
    var secondary = s.getOther(primary).getDB("test");

    // Make sure that we don't move that chunk if it goes past what we consider the maximum chunk
    // size
    print("Checkpoint 1a");
    var max = 200 * 1024 * 1024;
    assert.throws(function() {
        s.adminCommand({
            movechunk: "test.foo",
            find: {_id: 1},
            to: secondary.getMongo().name,
            maxChunkSizeBytes: max
        });
    });

    // Move the chunk
    print("checkpoint 1b");
    var before = s.config.chunks.find({ns: 'test.foo'}).toArray();
    assert.commandWorked(
        s.s0.adminCommand({movechunk: "test.foo", find: {_id: 1}, to: secondary.getMongo().name}));

    var after = s.config.chunks.find({ns: 'test.foo'}).toArray();
    assert.neq(before[0].shard, after[0].shard, "move chunk did not work");

    s.config.changelog.find().forEach(printjson);

    s.stop();
})();