1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
|
(function() {
"use strict";
var st =
new ShardingTest({name: 'migrateBig_balancer', shards: 2, other: {enableBalancer: true}});
var mongos = st.s;
var admin = mongos.getDB("admin");
var db = mongos.getDB("test");
var coll = db.getCollection("stuff");
assert.commandWorked(admin.runCommand({enablesharding: coll.getDB().getName()}));
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
var data = "x";
var nsq = 16;
var n = 255;
for (var i = 0; i < nsq; i++)
data += data;
var dataObj = {};
for (var i = 0; i < n; i++)
dataObj["data-" + i] = data;
var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < 40; i++) {
bulk.insert({data: dataObj});
}
assert.writeOK(bulk.execute());
assert.eq(40, coll.count(), "prep1");
printjson(coll.stats());
admin.printShardingStatus();
admin.runCommand({shardcollection: "" + coll, key: {_id: 1}});
assert.lt(
5, mongos.getDB("config").chunks.find({ns: "test.stuff"}).count(), "not enough chunks");
assert.soon(function() {
var res = mongos.getDB("config").chunks.group({
cond: {ns: "test.stuff"},
key: {shard: 1},
reduce: function(doc, out) {
out.nChunks++;
},
initial: {nChunks: 0}
});
printjson(res);
return res.length > 1 && Math.abs(res[0].nChunks - res[1].nChunks) <= 3;
}, "never migrated", 10 * 60 * 1000, 1000);
st.stop();
})();
|