1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
|
/**
* Test the autosplitter when a collection has very low cardinality
*
* @tags: [requires_fcv_44]
*/
(function() {
'use strict';
load('jstests/sharding/autosplit_include.js');
var st = new ShardingTest({
name: "low_cardinality",
other: {enableAutoSplit: true, chunkSize: 1},
});
assert.commandWorked(st.s.adminCommand({enablesharding: "test"}));
assert.commandWorked(st.s.adminCommand({shardcollection: "test.foo", key: {sk: 1}}));
const bigString = "X".repeat(1024 * 1024 / 4); // 250 KB
var coll = st.getDB("test").getCollection("foo");
// Insert $numDocs documents into the collection under $key.
// Each document contains a string of 250KB
// waits for any ongoing splits to finish, and then prints some information
// about the collection's chunks
function insertBigDocsWithKey(key, numDocs) {
var bulk = coll.initializeUnorderedBulkOp();
for (var i = 0; i < numDocs; i++) {
bulk.insert({sk: key, sub: i, bs: bigString});
}
assert.commandWorked(bulk.execute());
waitForOngoingChunkSplits(st);
}
function numChunks() {
return st.config.chunks.count({"ns": "test.foo"});
}
// Accumulate ~1MB of documents under -10 and +10
insertBigDocsWithKey(-10, 4);
insertBigDocsWithKey(10, 4);
waitForOngoingChunkSplits(st);
let expectedNumChunks = 2;
try {
// At least one split should have been performed
assert.gte(numChunks(),
expectedNumChunks,
"Number of chunks is less than 2, no split have been perfomed");
} catch (e) {
// (SERVER-59882) split may not have happened due to commit delay of the inserted documents
print("Retrying performing one insert after catching exception " + e);
insertBigDocsWithKey(10, 1);
waitForOngoingChunkSplits(st);
assert.gte(
numChunks(),
expectedNumChunks,
"Number of chunks is less than " + expectedNumChunks + ", no split has been perfomed");
}
expectedNumChunks++;
insertBigDocsWithKey(20, 4);
waitForOngoingChunkSplits(st);
// An additional split should have been performed
try {
assert.gte(numChunks(), expectedNumChunks, "Number of chunks must be at least 3");
} catch (e) {
// (SERVER-59882) split may not have happened due to commit delay of the inserted documents
print("Retrying performing one insert after catching exception " + e);
insertBigDocsWithKey(20, 1);
waitForOngoingChunkSplits(st);
assert.gte(
numChunks(),
expectedNumChunks,
"Number of chunks is less than " + 3 + ", not all expected splits have been perfomed");
}
st.stop();
})();
|