summaryrefslogtreecommitdiff
path: root/jstests/sharding/sharding_balance2.js
diff options
context:
space:
mode:
authorJonathan Abrahams <jonathan@mongodb.com>2016-03-09 12:17:50 -0500
committerJonathan Abrahams <jonathan@mongodb.com>2016-03-09 12:18:14 -0500
commit4ae691e8edc87d0e3cfb633bb91c328426be007b (patch)
tree52079a593f54382ca13a2e741633eab1b6271893 /jstests/sharding/sharding_balance2.js
parenta025d43f3ce2efc1fb1282a718f5d286fa0a4dc1 (diff)
downloadmongo-4ae691e8edc87d0e3cfb633bb91c328426be007b.tar.gz
SERVER-22468 Format JS code with approved style in jstests/
Diffstat (limited to 'jstests/sharding/sharding_balance2.js')
-rw-r--r--jstests/sharding/sharding_balance2.js101
1 files changed, 50 insertions, 51 deletions
diff --git a/jstests/sharding/sharding_balance2.js b/jstests/sharding/sharding_balance2.js
index e7ad317e0f5..37c84ed8ded 100644
--- a/jstests/sharding/sharding_balance2.js
+++ b/jstests/sharding/sharding_balance2.js
@@ -3,74 +3,73 @@
*/
(function() {
-"use strict";
+ "use strict";
-var MaxSizeMB = 1;
+ var MaxSizeMB = 1;
-var s = new ShardingTest({ shards: 2, other: { chunkSize: 1, manualAddShard: true }});
-var db = s.getDB( "test" );
-s.stopBalancer();
+ var s = new ShardingTest({shards: 2, other: {chunkSize: 1, manualAddShard: true}});
+ var db = s.getDB("test");
+ s.stopBalancer();
-var names = s.getConnNames();
-assert.eq(2, names.length);
-s.adminCommand({ addshard: names[0] });
-s.adminCommand({ addshard: names[1], maxSize: MaxSizeMB });
+ var names = s.getConnNames();
+ assert.eq(2, names.length);
+ s.adminCommand({addshard: names[0]});
+ s.adminCommand({addshard: names[1], maxSize: MaxSizeMB});
-s.adminCommand({ enablesharding: "test" });
-var res = db.adminCommand({ movePrimary: 'test', to: names[0] });
-assert(res.ok || res.errmsg == "it is already the primary");
+ s.adminCommand({enablesharding: "test"});
+ var res = db.adminCommand({movePrimary: 'test', to: names[0]});
+ assert(res.ok || res.errmsg == "it is already the primary");
+ var bigString = "";
+ while (bigString.length < 10000)
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-var bigString = "";
-while ( bigString.length < 10000 )
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+ var inserted = 0;
+ var num = 0;
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ while (inserted < (40 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+ }
+ assert.writeOK(bulk.execute());
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+ assert.gt(s.config.chunks.count(), 10);
-var inserted = 0;
-var num = 0;
-var bulk = db.foo.initializeUnorderedBulkOp();
-while ( inserted < ( 40 * 1024 * 1024 ) ){
- bulk.insert({ _id: num++, s: bigString });
- inserted += bigString.length;
-}
-assert.writeOK(bulk.execute());
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
-assert.gt(s.config.chunks.count(), 10);
+ var getShardSize = function(conn) {
+ var listDatabases = conn.getDB('admin').runCommand({listDatabases: 1});
+ return listDatabases.totalSize;
+ };
-var getShardSize = function(conn) {
- var listDatabases = conn.getDB('admin').runCommand({ listDatabases: 1 });
- return listDatabases.totalSize;
-};
+ var shardConn = new Mongo(names[1]);
-var shardConn = new Mongo(names[1]);
+ // Make sure that shard doesn't have any documents.
+ assert.eq(0, shardConn.getDB('test').foo.find().itcount());
-// Make sure that shard doesn't have any documents.
-assert.eq(0, shardConn.getDB('test').foo.find().itcount());
+ var maxSizeBytes = MaxSizeMB * 1024 * 1024;
-var maxSizeBytes = MaxSizeMB * 1024 * 1024;
+ // Fill the shard with documents to exceed the max size so the balancer won't move
+ // chunks to this shard.
+ var localColl = shardConn.getDB('local').padding;
+ while (getShardSize(shardConn) < maxSizeBytes) {
+ var localBulk = localColl.initializeUnorderedBulkOp();
-// Fill the shard with documents to exceed the max size so the balancer won't move
-// chunks to this shard.
-var localColl = shardConn.getDB('local').padding;
-while (getShardSize(shardConn) < maxSizeBytes) {
- var localBulk = localColl.initializeUnorderedBulkOp();
+ for (var x = 0; x < 20; x++) {
+ localBulk.insert({x: x, val: bigString});
+ }
+ assert.writeOK(localBulk.execute());
- for (var x = 0; x < 20; x++) {
- localBulk.insert({ x: x, val: bigString });
+ // Force the storage engine to flush files to disk so shardSize will get updated.
+ assert.commandWorked(shardConn.getDB('admin').runCommand({fsync: 1}));
}
- assert.writeOK(localBulk.execute());
-
- // Force the storage engine to flush files to disk so shardSize will get updated.
- assert.commandWorked(shardConn.getDB('admin').runCommand({ fsync: 1 }));
-}
-s.startBalancer();
+ s.startBalancer();
-// Wait until balancer finishes at least one balancing round.
-assert(s.waitForBalancerRound(), "Balancer is not running: it never pinged config.mongos");
+ // Wait until balancer finishes at least one balancing round.
+ assert(s.waitForBalancerRound(), "Balancer is not running: it never pinged config.mongos");
-var chunkCounts = s.chunkCounts('foo', 'test');
-assert.eq(0, chunkCounts.shard0001);
+ var chunkCounts = s.chunkCounts('foo', 'test');
+ assert.eq(0, chunkCounts.shard0001);
-s.stop();
+ s.stop();
})();