1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
|
/**
* This test reproduces the error reported in HELP-22995. It creates a jumbo chunk with documents
* that are close to the 16MB document size limit to force the batching code in move chunk to
* consider adding them together in a batch. It ensures that the proper document size is considered
* and that we can still migrate when calling removeShard.
*
* @tags: [
* does_not_support_stepdowns,
* multiversion_incompatible,
* ]
*/
(function() {
'use strict';
// TODO SERVER-50144 Remove this and allow orphan checking.
// This test calls removeShard which can leave docs in config.rangeDeletions in state "pending",
// therefore preventing orphans from being cleaned up.
TestData.skipCheckOrphans = true;
const dbName = "test";
const collName = "user";
const ns = dbName + "." + collName;
const shardKeys = [-1, 1];
// This number is chosen so that the chunks are considered 'large' as defined by
// the MigrationChunkClonerSourceLegacy class. Currently, that class considers chunks containing
// more than the following number of documents as 'large':
// (13/10) * MaxChunkSize / avgRecSize (MaxChunkSize is 64MB by default)
const numDocs = 10;
// Size is slightly under the 16MB document size limit. This ensures that any two documents must be
// be in separate batches when cloning.
const bigDocSize = 16 * 1024 * 1024 - 4096;
const bigDocPayload = "x".repeat(bigDocSize);
let st = new ShardingTest({shards: 2});
assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
st.ensurePrimaryShard(dbName, st.shard0.shardName);
jsTest.log("Sharding collection with one chunk on each shard.");
assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}}));
assert.commandWorked(st.s.adminCommand({split: ns, middle: {x: 0}}));
assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {x: 1}, to: st.shard1.shardName}));
function removeShardAndWait(shardName) {
const removeShardCmd = {removeShard: shardName};
const res = st.s.adminCommand(removeShardCmd);
assert.commandWorked(res);
assert(res.state === "started");
assert.soon(function() {
let res = st.s.adminCommand(removeShardCmd);
if (res.state === "completed") {
return true;
} else {
jsTest.log("Still waiting for shard removal to complete:");
printjson(res);
assert.commandWorked(st.s.adminCommand({clearJumboFlag: ns, find: {"x": 1}}));
return false;
}
});
jsTest.log("Shard removal complete.");
}
function assertDocsExist(shardKeys, numDocs, payloadSize) {
shardKeys.forEach(key => {
for (let i = 0; i < numDocs; i++) {
let db = st.rs0.getPrimary().getDB(dbName);
let query = {x: key, seq: i};
let doc = db.getCollection(collName).findOne(query);
assert(doc);
let payload = doc.data;
assert.eq(payload.length,
payloadSize,
tojson(query) + " does not have the expected payload length of " +
payloadSize + " bytes");
}
});
}
jsTest.log("Insert " + numDocs + " documents with " + bigDocSize + " bytes each.");
shardKeys.forEach(key => {
for (let i = 0; i < numDocs; i++) {
let doc = {x: key, seq: i, data: bigDocPayload};
assert.commandWorked(st.s.getCollection(ns).insert(doc));
}
});
// Start balancer to migrate chunks from the removed shard.
assert.commandWorked(st.s.getDB("config").settings.update(
{_id: "balancer"}, {$set: {attemptToBalanceJumboChunks: true}}, true));
st.startBalancer();
removeShardAndWait(st.shard1.shardName);
assertDocsExist(shardKeys, numDocs, bigDocSize);
st.stop();
})();
|