1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
|
/*
* Tests that the balancer splits the sessions collection and uniformly distributes the chunks
* across shards in the cluster.
* @tags: [resource_intensive]
*/
(function() {
"use strict";
/*
* Returns the number of chunks for the sessions collection.
*/
function getNumTotalChunks() {
return configDB.chunks.count({ns: kSessionsNs});
}
/*
* Returns the number of chunks for the sessions collection that are the given shard.
*/
function getNumChunksOnShard(shardName) {
return configDB.chunks.count({ns: kSessionsNs, shard: shardName});
}
/*
* Returns the number of docs in the sessions collection on the given host.
*/
function getNumDocs(conn) {
return conn.getCollection(kSessionsNs).count();
}
/*
* Starts a replica-set shard, adds the shard to the cluster, and increments numShards.
* Returns the ReplSetTest object for the shard.
*/
function addShardToCluster() {
const shardName = clusterName + "-rs" + numShards;
const replTest = new ReplSetTest({name: shardName, nodes: 1});
replTest.startSet({shardsvr: ""});
replTest.initiate();
assert.commandWorked(st.s.adminCommand({addShard: replTest.getURL(), name: shardName}));
numShards++;
return replTest;
}
/*
* Removes the given shard from the cluster, waits util the state is completed, and
* decrements numShards.
*/
function removeShardFromCluster(shardName) {
assert.commandWorked(st.s.adminCommand({removeShard: shardName}));
assert.soon(function() {
const res = st.s.adminCommand({removeShard: shardName});
assert.commandWorked(res);
return ("completed" == res.state);
}, "failed to remove shard " + shardName, kBalancerTimeoutMS);
numShards--;
}
/*
* Returns true if the chunks for the sessions collection are evenly distributed across the
* given shards. That is, the number of chunks on the most loaded shard and on the least
* loaded shard differs by no more than 1.
*/
function isBalanced(shardNames) {
const expectedMinNumChunksPerShard = Math.floor(kExpectedNumChunks / shardNames.length);
let minNumChunks = Number.MAX_VALUE;
let maxNumChunks = 0;
for (const shardName of shardNames) {
const numChunks = getNumChunksOnShard(shardName);
minNumChunks = Math.min(numChunks, minNumChunks);
maxNumChunks = Math.max(numChunks, maxNumChunks);
}
return (maxNumChunks - minNumChunks <= 1) && (minNumChunks == expectedMinNumChunksPerShard);
}
/*
* Returns the standard deviation for given numbers.
*/
function computeStdDev(nums) {
const mean = nums.reduce((a, b) => a + b) / nums.length;
return Math.sqrt(nums.map(x => Math.pow(x - mean, 2)).reduce((a, b) => a + b) / nums.length);
}
const kMinNumChunks = 100;
const kExpectedNumChunks = 128; // the balancer rounds kMinNumChunks to the next power of 2.
const kNumSessions = 10000;
const kBalancerTimeoutMS = 5 * 60 * 1000;
let numShards = 2;
const clusterName = jsTest.name();
const st = new ShardingTest({
name: clusterName,
shards: numShards,
other: {configOptions: {setParameter: {minNumChunksForSessionsCollection: kMinNumChunks}}}
});
const kSessionsNs = "config.system.sessions";
const configDB = st.s.getDB("config");
// There is only one chunk initially.
assert.eq(1, getNumTotalChunks());
st.startBalancer();
jsTest.log(
"Verify that the balancer splits the initial chunks and distributes chunks evenly across existing shards");
assert.soon(() => getNumTotalChunks() == kExpectedNumChunks,
"balancer did not split the initial chunk for the sessions collection");
assert.soon(() => isBalanced([st.shard0.shardName, st.shard1.shardName]),
"balancer did not distribute chunks evenly across existing shards",
kBalancerTimeoutMS);
jsTest.log(
"Verify that the balancer redistributes chunks when more shards are added to the cluster");
const shard2 = addShardToCluster();
const shard3 = addShardToCluster();
const shard4 = addShardToCluster();
assert.soon(() => isBalanced(
[st.shard0.shardName, st.shard1.shardName, shard2.name, shard3.name, shard4.name]),
"balancer did not redistribute chunks evenly after more shards were added",
kBalancerTimeoutMS);
jsTest.log("Verify that the session docs are distributed almost evenly across shards");
// Start sessions and trigger a refresh to flush the sessions to the sessions collection.
for (let i = 0; i < kNumSessions; i++) {
assert.commandWorked(st.s.adminCommand({startSession: 1}));
}
assert.commandWorked(st.s.adminCommand({refreshLogicalSessionCacheNow: 1}));
assert.lte(kNumSessions, getNumDocs(st.s));
const shards =
[st.shard0, st.shard1, shard2.getPrimary(), shard3.getPrimary(), shard4.getPrimary()];
const numDocsOnShards = shards.map(shard => getNumDocs(shard));
assert.lt(computeStdDev(numDocsOnShards), 0.1 * kNumSessions / shards.length);
jsTest.log(
"Verify that the balancer redistributes chunks when shards are removed from the cluster");
removeShardFromCluster(shard2.name);
assert.soon(() => isBalanced([st.shard0.shardName, st.shard1.shardName, shard3.name, shard4.name]),
"balancer did not redistribute chunks evenly after one of the shards was removed",
kBalancerTimeoutMS);
assert.eq(0, getNumChunksOnShard(shard2.name));
st.stopBalancer();
st.stop();
shard2.stopSet();
shard3.stopSet();
shard4.stopSet();
}());
|