summaryrefslogtreecommitdiff
path: root/jstests/sharding/sharding_balance2.js
diff options
context:
space:
mode:
Diffstat (limited to 'jstests/sharding/sharding_balance2.js')
-rw-r--r--jstests/sharding/sharding_balance2.js104
1 files changed, 52 insertions, 52 deletions
diff --git a/jstests/sharding/sharding_balance2.js b/jstests/sharding/sharding_balance2.js
index 7bba7e25bf3..697f3f5c0b0 100644
--- a/jstests/sharding/sharding_balance2.js
+++ b/jstests/sharding/sharding_balance2.js
@@ -2,68 +2,68 @@
* Test the maxSize setting for the addShard command.
*/
(function() {
- 'use strict';
-
- var MaxSizeMB = 1;
-
- var s = new ShardingTest({shards: 2, other: {chunkSize: 1, manualAddShard: true}});
- var db = s.getDB("test");
-
- var names = s.getConnNames();
- assert.eq(2, names.length);
- assert.commandWorked(s.s0.adminCommand({addshard: names[0]}));
- assert.commandWorked(s.s0.adminCommand({addshard: names[1], maxSize: MaxSizeMB}));
- assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
- s.ensurePrimaryShard('test', names[0]);
-
- var bigString = "";
- while (bigString.length < 10000)
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-
- var inserted = 0;
- var num = 0;
- var bulk = db.foo.initializeUnorderedBulkOp();
- while (inserted < (40 * 1024 * 1024)) {
- bulk.insert({_id: num++, s: bigString});
- inserted += bigString.length;
- }
- assert.writeOK(bulk.execute());
+'use strict';
+
+var MaxSizeMB = 1;
+
+var s = new ShardingTest({shards: 2, other: {chunkSize: 1, manualAddShard: true}});
+var db = s.getDB("test");
- assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
- assert.gt(s.config.chunks.count({"ns": "test.foo"}), 10);
+var names = s.getConnNames();
+assert.eq(2, names.length);
+assert.commandWorked(s.s0.adminCommand({addshard: names[0]}));
+assert.commandWorked(s.s0.adminCommand({addshard: names[1], maxSize: MaxSizeMB}));
+assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
+s.ensurePrimaryShard('test', names[0]);
- var getShardSize = function(conn) {
- var listDatabases = conn.getDB('admin').runCommand({listDatabases: 1});
- return listDatabases.totalSize;
- };
+var bigString = "";
+while (bigString.length < 10000)
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
- var shardConn = new Mongo(names[1]);
+var inserted = 0;
+var num = 0;
+var bulk = db.foo.initializeUnorderedBulkOp();
+while (inserted < (40 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+}
+assert.writeOK(bulk.execute());
- // Make sure that shard doesn't have any documents.
- assert.eq(0, shardConn.getDB('test').foo.find().itcount());
+assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
+assert.gt(s.config.chunks.count({"ns": "test.foo"}), 10);
- var maxSizeBytes = MaxSizeMB * 1024 * 1024;
+var getShardSize = function(conn) {
+ var listDatabases = conn.getDB('admin').runCommand({listDatabases: 1});
+ return listDatabases.totalSize;
+};
- // Fill the shard with documents to exceed the max size so the balancer won't move
- // chunks to this shard.
- var localColl = shardConn.getDB('local').padding;
- while (getShardSize(shardConn) < maxSizeBytes) {
- var localBulk = localColl.initializeUnorderedBulkOp();
+var shardConn = new Mongo(names[1]);
- for (var x = 0; x < 20; x++) {
- localBulk.insert({x: x, val: bigString});
- }
- assert.writeOK(localBulk.execute());
+// Make sure that shard doesn't have any documents.
+assert.eq(0, shardConn.getDB('test').foo.find().itcount());
- // Force the storage engine to flush files to disk so shardSize will get updated.
- assert.commandWorked(shardConn.getDB('admin').runCommand({fsync: 1}));
+var maxSizeBytes = MaxSizeMB * 1024 * 1024;
+
+// Fill the shard with documents to exceed the max size so the balancer won't move
+// chunks to this shard.
+var localColl = shardConn.getDB('local').padding;
+while (getShardSize(shardConn) < maxSizeBytes) {
+ var localBulk = localColl.initializeUnorderedBulkOp();
+
+ for (var x = 0; x < 20; x++) {
+ localBulk.insert({x: x, val: bigString});
}
+ assert.writeOK(localBulk.execute());
+
+ // Force the storage engine to flush files to disk so shardSize will get updated.
+ assert.commandWorked(shardConn.getDB('admin').runCommand({fsync: 1}));
+}
- s.startBalancer();
- s.awaitBalancerRound();
+s.startBalancer();
+s.awaitBalancerRound();
- var chunkCounts = s.chunkCounts('foo', 'test');
- assert.eq(0, chunkCounts[s.rs1.name]);
+var chunkCounts = s.chunkCounts('foo', 'test');
+assert.eq(0, chunkCounts[s.rs1.name]);
- s.stop();
+s.stop();
})();