summaryrefslogtreecommitdiff
path: root/jstests/sharding/delete_during_migrate.js
blob: 982b0c00787ba09c64cce8b1a230195f1366f5e9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
// Test migrating a big chunk while deletions are happening within that chunk.
// Test is slightly non-deterministic, since removes could happen before migrate
// starts. Protect against that by making chunk very large.

// start up a new sharded cluster
var st = new ShardingTest({shards: 2, mongos: 1});
// Balancer is by default stopped, thus we have manual control

var dbname = "testDB";
var coll = "foo";
var ns = dbname + "." + coll;
var s = st.s0;
var t = s.getDB(dbname).getCollection(coll);

s.adminCommand({enablesharding: dbname});
st.ensurePrimaryShard(dbname, 'shard0001');

// Create fresh collection with lots of docs
t.drop();
var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < 200000; i++) {
    bulk.insert({a: i});
}
assert.writeOK(bulk.execute());

// enable sharding of the collection. Only 1 chunk.
t.ensureIndex({a: 1});
s.adminCommand({shardcollection: ns, key: {a: 1}});

// start a parallel shell that deletes things
startMongoProgramNoConnect("mongo",
                           "--host",
                           getHostName(),
                           "--port",
                           st.s0.port,
                           "--eval",
                           "db." + coll + ".remove({});",
                           dbname);

// migrate while deletions are happening
var moveResult = s.adminCommand(
    {moveChunk: ns, find: {a: 1}, to: st.getOther(st.getPrimaryShard(dbname)).name});
// check if migration worked
assert(moveResult.ok, "migration didn't work while doing deletes");

st.stop();