summaryrefslogtreecommitdiff
path: root/jstests/sharding
diff options
context:
space:
mode:
authorKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-01-15 18:20:21 -0500
committerKaloian Manassiev <kaloian.manassiev@mongodb.com>2018-01-16 11:37:25 -0500
commitb5ebe8a5492c4f5e33970c0f885b9ac51460b9dc (patch)
treecd3c82c5e24c3840c807e53941a88bda6c5fe58e /jstests/sharding
parent07031843f1868a956b28f9e0e4d546cce4badfbf (diff)
downloadmongo-b5ebe8a5492c4f5e33970c0f885b9ac51460b9dc.tar.gz
SERVER-29423 Prevent the balancer policy from scheduling migrations with the same source or destination
Diffstat (limited to 'jstests/sharding')
-rw-r--r--jstests/sharding/auto_rebalance_parallel.js98
1 files changed, 54 insertions, 44 deletions
diff --git a/jstests/sharding/auto_rebalance_parallel.js b/jstests/sharding/auto_rebalance_parallel.js
index 4971fc19f7f..c7078a6898a 100644
--- a/jstests/sharding/auto_rebalance_parallel.js
+++ b/jstests/sharding/auto_rebalance_parallel.js
@@ -1,61 +1,71 @@
/**
* Tests that the cluster is balanced in parallel in one balancer round (standalone).
*/
+
(function() {
'use strict';
var st = new ShardingTest({shards: 4});
+ var config = st.s0.getDB('config');
assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
st.ensurePrimaryShard('TestDB', st.shard0.shardName);
- assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.TestColl', key: {Key: 1}}));
-
- var coll = st.s0.getDB('TestDB').TestColl;
-
- // Create 4 chunks initially and ensure they get balanced within 1 balancer round
- assert.writeOK(coll.insert({Key: 1, Value: 'Test value 1'}));
- assert.writeOK(coll.insert({Key: 10, Value: 'Test value 10'}));
- assert.writeOK(coll.insert({Key: 20, Value: 'Test value 20'}));
- assert.writeOK(coll.insert({Key: 30, Value: 'Test value 30'}));
-
- assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 10}));
- assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 20}));
- assert.commandWorked(st.splitAt('TestDB.TestColl', {Key: 30}));
-
- // Move two of the chunks to shard0001 so we have option to do parallel balancing
- assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 20}, st.shard1.shardName));
- assert.commandWorked(st.moveChunk('TestDB.TestColl', {Key: 30}, st.shard1.shardName));
-
- assert.eq(2,
- st.s0.getDB('config')
- .chunks.find({ns: "TestDB.TestColl", shard: st.shard0.shardName})
- .itcount());
- assert.eq(2,
- st.s0.getDB('config')
- .chunks.find({ns: "TestDB.TestColl", shard: st.shard1.shardName})
- .itcount());
-
- // Do enable the balancer and wait for a single balancer round
+
+ function prepareCollectionForBalance(collName) {
+ assert.commandWorked(st.s0.adminCommand({shardCollection: collName, key: {Key: 1}}));
+
+ var coll = st.s0.getCollection(collName);
+
+ // Create 4 chunks initially and ensure they get balanced within 1 balancer round
+ assert.writeOK(coll.insert({Key: 1, Value: 'Test value 1'}));
+ assert.writeOK(coll.insert({Key: 10, Value: 'Test value 10'}));
+ assert.writeOK(coll.insert({Key: 20, Value: 'Test value 20'}));
+ assert.writeOK(coll.insert({Key: 30, Value: 'Test value 30'}));
+
+ assert.commandWorked(st.splitAt(collName, {Key: 10}));
+ assert.commandWorked(st.splitAt(collName, {Key: 20}));
+ assert.commandWorked(st.splitAt(collName, {Key: 30}));
+
+ // Move two of the chunks to shard0001 so we have option to do parallel balancing
+ assert.commandWorked(st.moveChunk(collName, {Key: 20}, st.shard1.shardName));
+ assert.commandWorked(st.moveChunk(collName, {Key: 30}, st.shard1.shardName));
+
+ assert.eq(2, config.chunks.find({ns: collName, shard: st.shard0.shardName}).itcount());
+ assert.eq(2, config.chunks.find({ns: collName, shard: st.shard1.shardName}).itcount());
+ }
+
+ function checkCollectionBalanced(collName) {
+ assert.eq(1, config.chunks.find({ns: collName, shard: st.shard0.shardName}).itcount());
+ assert.eq(1, config.chunks.find({ns: collName, shard: st.shard1.shardName}).itcount());
+ assert.eq(1, config.chunks.find({ns: collName, shard: st.shard2.shardName}).itcount());
+ assert.eq(1, config.chunks.find({ns: collName, shard: st.shard3.shardName}).itcount());
+ }
+
+ function countMoves(collName) {
+ return config.changelog.find({what: 'moveChunk.start', ns: collName}).itcount();
+ }
+
+ prepareCollectionForBalance('TestDB.TestColl1');
+ prepareCollectionForBalance('TestDB.TestColl2');
+
+ // Count the moveChunk start attempts accurately and ensure that only the correct number of
+ // migrations are scheduled
+ const testColl1InitialMoves = countMoves('TestDB.TestColl1');
+ const testColl2InitialMoves = countMoves('TestDB.TestColl2');
+
st.startBalancer();
st.awaitBalancerRound();
+ st.awaitBalancerRound();
st.stopBalancer();
- assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard0.shardName})
- .itcount());
- assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard1.shardName})
- .itcount());
- assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard2.shardName})
- .itcount());
- assert.eq(1,
- st.s0.getDB('config')
- .chunks.find({ns: 'TestDB.TestColl', shard: st.shard3.shardName})
- .itcount());
+ checkCollectionBalanced('TestDB.TestColl1');
+ checkCollectionBalanced('TestDB.TestColl2');
+
+ assert.eq(2, countMoves('TestDB.TestColl1') - testColl1InitialMoves);
+ assert.eq(2, countMoves('TestDB.TestColl2') - testColl2InitialMoves);
+
+ // Ensure there are no migration errors reported
+ assert.eq(0, config.changelog.find({what: 'moveChunk.error'}).itcount());
st.stop();
})();