summaryrefslogtreecommitdiff
path: root/jstests/sharding/delete_during_migrate.js
blob: 04c3075b1f143f5e5369642b53a9c40855462be8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
/**
 * Test migrating a big chunk while deletions are happening within that chunk. Test is slightly
 * non-deterministic, since removes could happen before migrate starts. Protect against that by
 * making chunk very large.
 *
 * This test is labeled resource intensive because its total io_write is 88MB compared to a median
 * of 5MB across all sharding tests in wiredTiger.
 * @tags: [resource_intensive]
 */
(function() {
    'use strict';

    var st = new ShardingTest({shards: 2, mongos: 1});

    var dbname = "test";
    var coll = "foo";
    var ns = dbname + "." + coll;

    assert.commandWorked(st.s0.adminCommand({enablesharding: dbname}));
    st.ensurePrimaryShard(dbname, st.shard1.shardName);

    var t = st.s0.getDB(dbname).getCollection(coll);

    var bulk = t.initializeUnorderedBulkOp();
    for (var i = 0; i < 200000; i++) {
        bulk.insert({a: i});
    }
    assert.writeOK(bulk.execute());

    // enable sharding of the collection. Only 1 chunk.
    t.ensureIndex({a: 1});

    assert.commandWorked(st.s0.adminCommand({shardcollection: ns, key: {a: 1}}));

    // start a parallel shell that deletes things
    var join = startParallelShell("db." + coll + ".remove({});", st.s0.port);

    // migrate while deletions are happening
    assert.commandWorked(st.s0.adminCommand(
        {moveChunk: ns, find: {a: 1}, to: st.getOther(st.getPrimaryShard(dbname)).name}));

    join();

    st.stop();
})();