summaryrefslogtreecommitdiff
path: root/jstests/noPassthroughWithMongod/sharding_migrate_large_docs.js
blob: 548c80a6db2bda72e4d30a0f5f0feb5ce8d3c0d1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
//
// Tests migration behavior of large documents
// @tags: [requires_sharding]
//

var st = new ShardingTest({shards: 2, mongos: 1});

var mongos = st.s0;
var coll = mongos.getCollection("foo.bar");
var admin = mongos.getDB("admin");
var shards = mongos.getCollection("config.shards").find().toArray();
var shardAdmin = st.shard0.getDB("admin");

assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id}));
assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);

jsTest.log("Preparing large insert...");

var data1MB = "x";
while (data1MB.length < 1024 * 1024)
    data1MB += data1MB;

var data15MB = "";
for (var i = 0; i < 15; i++)
    data15MB += data1MB;

var data15PlusMB = data15MB;
for (var i = 0; i < 1023 * 1024; i++)
    data15PlusMB += "x";

print("~15MB object size is : " + Object.bsonsize({_id: 0, d: data15PlusMB}));

jsTest.log("Inserting docs of large and small sizes...");

// Two large docs next to each other
coll.insert({_id: -2, d: data15PlusMB});
coll.insert({_id: -1, d: data15PlusMB});

// Docs of assorted sizes
assert.commandWorked(coll.insert({_id: 0, d: "x"}));
assert.commandWorked(coll.insert({_id: 1, d: data15PlusMB}));
assert.commandWorked(coll.insert({_id: 2, d: "x"}));
assert.commandWorked(coll.insert({_id: 3, d: data15MB}));
assert.commandWorked(coll.insert({_id: 4, d: "x"}));
assert.commandWorked(coll.insert({_id: 5, d: data1MB}));
assert.commandWorked(coll.insert({_id: 6, d: "x"}));

assert.eq(9, coll.find().itcount());

jsTest.log("Starting migration...");

assert(admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id}).ok);
assert(admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: shards[1]._id}).ok);

// Ensure that the doc count is correct and that the mongos query path can handle docs near the 16MB
// user BSON size limit.
assert.eq(9, coll.find().itcount());

jsTest.log("DONE!");

st.stop();