summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Grundy <michael.grundy@10gen.com>2016-02-05 15:13:45 -0500
committerMike Grundy <michael.grundy@10gen.com>2016-02-09 14:46:30 -0500
commitfb46f0112723f46d31b04c84aeb8aa6a3b08aa1f (patch)
tree85420b8810389c584c00a51510a4a6fb6222ee0f
parente0c067b5f7a10308c5a52f2dbb662e7cfdb41e1e (diff)
downloadmongo-fb46f0112723f46d31b04c84aeb8aa6a3b08aa1f.tar.gz
SERVER-22341 fix jslint errors in jstests/sharding with eslint --fix
-rw-r--r--jstests/sharding/SERVER-7379.js8
-rw-r--r--jstests/sharding/all_config_hosts_down.js14
-rw-r--r--jstests/sharding/array_shard_key.js60
-rw-r--r--jstests/sharding/auth.js16
-rw-r--r--jstests/sharding/auth2.js2
-rw-r--r--jstests/sharding/authCommands.js34
-rw-r--r--jstests/sharding/authConnectionHook.js2
-rw-r--r--jstests/sharding/auth_add_shard.js2
-rw-r--r--jstests/sharding/auth_copydb.js2
-rw-r--r--jstests/sharding/auth_slaveok_routing.js2
-rw-r--r--jstests/sharding/authmr.js4
-rw-r--r--jstests/sharding/authwhere.js4
-rw-r--r--jstests/sharding/auto1.js24
-rw-r--r--jstests/sharding/auto2.js18
-rw-r--r--jstests/sharding/balance_repl.js2
-rw-r--r--jstests/sharding/bouncing_count.js46
-rw-r--r--jstests/sharding/bulk_insert.js54
-rw-r--r--jstests/sharding/bulk_shard_insert.js56
-rw-r--r--jstests/sharding/cleanup_orphaned_auth.js2
-rw-r--r--jstests/sharding/cleanup_orphaned_basic.js2
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js2
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js2
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_hashed.js2
-rw-r--r--jstests/sharding/coll_epoch_test0.js34
-rw-r--r--jstests/sharding/coll_epoch_test1.js76
-rw-r--r--jstests/sharding/coll_epoch_test2.js84
-rw-r--r--jstests/sharding/count1.js62
-rw-r--r--jstests/sharding/count2.js22
-rw-r--r--jstests/sharding/count_slaveok.js34
-rw-r--r--jstests/sharding/csrs_upgrade_during_migrate.js2
-rw-r--r--jstests/sharding/cursor1.js20
-rw-r--r--jstests/sharding/cursor_cleanup.js2
-rw-r--r--jstests/sharding/drop_configdb.js8
-rw-r--r--jstests/sharding/enable_sharding_basic.js2
-rw-r--r--jstests/sharding/explain_read_pref.js2
-rw-r--r--jstests/sharding/features1.js16
-rw-r--r--jstests/sharding/features2.js22
-rw-r--r--jstests/sharding/findandmodify1.js2
-rw-r--r--jstests/sharding/forget_mr_temp_ns.js2
-rw-r--r--jstests/sharding/geo_near_random1.js4
-rw-r--r--jstests/sharding/geo_near_random2.js4
-rw-r--r--jstests/sharding/geo_shardedgeonear.js8
-rw-r--r--jstests/sharding/group_slaveok.js6
-rw-r--r--jstests/sharding/hash_basic.js2
-rw-r--r--jstests/sharding/hash_shard1.js6
-rw-r--r--jstests/sharding/hash_shard_unique_compound.js6
-rw-r--r--jstests/sharding/index1.js122
-rw-r--r--jstests/sharding/inserts_consistent.js70
-rw-r--r--jstests/sharding/jumbo1.js18
-rw-r--r--jstests/sharding/key_many.js2
-rw-r--r--jstests/sharding/key_string.js18
-rw-r--r--jstests/sharding/large_chunk.js6
-rw-r--r--jstests/sharding/limit_push.js6
-rw-r--r--jstests/sharding/listDatabases.js20
-rw-r--r--jstests/sharding/localhostAuthBypass.js4
-rw-r--r--jstests/sharding/major_version_check.js38
-rw-r--r--jstests/sharding/mapReduce_inSharded.js8
-rw-r--r--jstests/sharding/mapReduce_inSharded_outSharded.js8
-rw-r--r--jstests/sharding/mapReduce_nonSharded.js8
-rw-r--r--jstests/sharding/mapReduce_outSharded.js8
-rw-r--r--jstests/sharding/map_reduce_validation.js6
-rw-r--r--jstests/sharding/max_time_ms_sharded.js8
-rw-r--r--jstests/sharding/merge_chunks_basic.js2
-rw-r--r--jstests/sharding/migrateBig.js34
-rw-r--r--jstests/sharding/migrateBig_balancer.js20
-rw-r--r--jstests/sharding/migration_sets_fromMigrate_flag.js2
-rw-r--r--jstests/sharding/migration_with_source_ops.js2
-rw-r--r--jstests/sharding/mongos_no_detect_sharding.js32
-rw-r--r--jstests/sharding/mongos_validate_backoff.js2
-rw-r--r--jstests/sharding/mongos_validate_writes.js80
-rw-r--r--jstests/sharding/movePrimary1.js2
-rw-r--r--jstests/sharding/move_chunk_basic.js2
-rw-r--r--jstests/sharding/move_primary_basic.js2
-rw-r--r--jstests/sharding/movechunk_include.js8
-rw-r--r--jstests/sharding/movechunk_with_default_paranoia.js8
-rw-r--r--jstests/sharding/movechunk_with_moveParanoia.js8
-rw-r--r--jstests/sharding/movechunk_with_noMoveParanoia.js8
-rw-r--r--jstests/sharding/moveprimary_ignore_sharded.js30
-rw-r--r--jstests/sharding/mrShardedOutputAuth.js2
-rw-r--r--jstests/sharding/mr_and_agg_versioning.js6
-rw-r--r--jstests/sharding/mr_shard_version.js60
-rw-r--r--jstests/sharding/multi_coll_drop.js32
-rw-r--r--jstests/sharding/multi_mongos2.js16
-rw-r--r--jstests/sharding/multi_mongos2a.js6
-rw-r--r--jstests/sharding/names.js24
-rw-r--r--jstests/sharding/no_empty_reset.js50
-rw-r--r--jstests/sharding/parallel.js24
-rw-r--r--jstests/sharding/pending_chunk.js2
-rw-r--r--jstests/sharding/prefix_shard_key.js8
-rw-r--r--jstests/sharding/query_config.js24
-rw-r--r--jstests/sharding/recovering_slaveok.js6
-rw-r--r--jstests/sharding/remove2.js20
-rw-r--r--jstests/sharding/replset_config/config_rs_no_primary.js4
-rw-r--r--jstests/sharding/return_partial_shards_down.js20
-rw-r--r--jstests/sharding/rs_stepdown_and_pooling.js2
-rw-r--r--jstests/sharding/shard1.js8
-rw-r--r--jstests/sharding/shard2.js28
-rw-r--r--jstests/sharding/shard3.js74
-rw-r--r--jstests/sharding/shard5.js4
-rw-r--r--jstests/sharding/shard6.js36
-rw-r--r--jstests/sharding/shard_collection_basic.js2
-rw-r--r--jstests/sharding/shard_existing.js2
-rw-r--r--jstests/sharding/shard_insert_getlasterror_w2.js2
-rw-r--r--jstests/sharding/shard_keycount.js22
-rw-r--r--jstests/sharding/shard_targeting.js4
-rw-r--r--jstests/sharding/sharding_balance1.js8
-rw-r--r--jstests/sharding/sharding_balance3.js6
-rw-r--r--jstests/sharding/sharding_balance4.js42
-rw-r--r--jstests/sharding/sharding_migrate_cursor1.js40
-rw-r--r--jstests/sharding/sharding_multiple_ns_rs.js10
-rw-r--r--jstests/sharding/sharding_rs1.js12
-rw-r--r--jstests/sharding/sharding_rs2.js50
-rw-r--r--jstests/sharding/sharding_system_namespaces.js2
-rw-r--r--jstests/sharding/sort1.js56
-rw-r--r--jstests/sharding/stale_version_write.js32
-rw-r--r--jstests/sharding/startup_with_all_configs_down.js2
-rw-r--r--jstests/sharding/stats.js30
-rw-r--r--jstests/sharding/sync_cluster_config/auth_config_down.js24
-rw-r--r--jstests/sharding/sync_cluster_config/dbhash_cache.js8
-rw-r--r--jstests/sharding/sync_cluster_config/empty_cluster_init.js2
-rw-r--r--jstests/sharding/sync_cluster_config/sync2.js2
-rw-r--r--jstests/sharding/tag_auto_split.js16
-rw-r--r--jstests/sharding/test_stacked_migration_cleanup.js12
-rw-r--r--jstests/sharding/unowned_doc_filtering.js2
-rw-r--r--jstests/sharding/update_immutable_fields.js16
-rw-r--r--jstests/sharding/update_sharded.js16
-rw-r--r--jstests/sharding/user_flags_sharded.js6
-rw-r--r--jstests/sharding/zbigMapReduce.js16
128 files changed, 1138 insertions, 1138 deletions
diff --git a/jstests/sharding/SERVER-7379.js b/jstests/sharding/SERVER-7379.js
index f59ec8d139f..c637f10c6b4 100644
--- a/jstests/sharding/SERVER-7379.js
+++ b/jstests/sharding/SERVER-7379.js
@@ -6,16 +6,16 @@ st.adminCommand({ shardcollection: "test.offerChange", key: { "categoryId": 1, "
var db = st.s.getDB('test');
var offerChange = db.getCollection('offerChange');
-var testDoc = { "_id": 123, "categoryId": 9881, "store": "NEW" }
+var testDoc = { "_id": 123, "categoryId": 9881, "store": "NEW" };
offerChange.remove({}, false);
-offerChange.insert(testDoc)
+offerChange.insert(testDoc);
assert.writeError(offerChange.update({ _id: 123 }, { $set: { store: "NEWEST" } }, true, false));
var doc = offerChange.findOne();
assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc));
offerChange.remove({}, false);
-offerChange.insert(testDoc)
+offerChange.insert(testDoc);
assert.writeError(offerChange.update({ _id: 123 },
{ _id: 123, categoryId: 9881, store: "NEWEST" },
true, false));
@@ -23,7 +23,7 @@ doc = offerChange.findOne();
assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc));
offerChange.remove({}, false);
-offerChange.insert(testDoc)
+offerChange.insert(testDoc);
assert.writeError(offerChange.save({ "_id": 123, "categoryId": 9881, "store": "NEWEST" }));
doc = offerChange.findOne();
assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc));
diff --git a/jstests/sharding/all_config_hosts_down.js b/jstests/sharding/all_config_hosts_down.js
index 0ca261bcf08..5827480dca4 100644
--- a/jstests/sharding/all_config_hosts_down.js
+++ b/jstests/sharding/all_config_hosts_down.js
@@ -5,10 +5,10 @@
(function() {
"use strict";
-var st = new ShardingTest({ shards : 1, mongos : 1 })
+var st = new ShardingTest({ shards : 1, mongos : 1 });
-var mongos = st.s
-var coll = mongos.getCollection( "foo.bar" )
+var mongos = st.s;
+var coll = mongos.getCollection( "foo.bar" );
jsTestLog( "Stopping config servers" );
for (var i = 0; i < st._configServers.length; i++) {
@@ -18,17 +18,17 @@ for (var i = 0; i < st._configServers.length; i++) {
// Make sure mongos has no database info currently loaded
mongos.getDB( "admin" ).runCommand({ flushRouterConfig : 1 });
-jsTestLog( "Config flushed and config servers down!" )
+jsTestLog( "Config flushed and config servers down!" );
// Throws transport error first and subsequent times when loading config data, not no primary
for( var i = 0; i < 2; i++ ){
try {
- coll.findOne()
+ coll.findOne();
// Should always throw
- assert( false )
+ assert( false );
}
catch( e ) {
- printjson( e )
+ printjson( e );
// Make sure we get a transport error, and not a no-primary error
assert(e.code == 8002 || // SCCC config down, for v3.0 compatibility.
diff --git a/jstests/sharding/array_shard_key.js b/jstests/sharding/array_shard_key.js
index 0c8d7e3a3dc..c5d63fcae59 100644
--- a/jstests/sharding/array_shard_key.js
+++ b/jstests/sharding/array_shard_key.js
@@ -1,18 +1,18 @@
// Ensure you can't shard on an array key
-var st = new ShardingTest({ name : jsTestName(), shards : 3 })
+var st = new ShardingTest({ name : jsTestName(), shards : 3 });
-var mongos = st.s0
+var mongos = st.s0;
-var coll = mongos.getCollection( jsTestName() + ".foo" )
+var coll = mongos.getCollection( jsTestName() + ".foo" );
-st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } )
+st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } );
-printjson( mongos.getDB("config").chunks.find().toArray() )
+printjson( mongos.getDB("config").chunks.find().toArray() );
-st.printShardingStatus()
+st.printShardingStatus();
-print( "1: insert some invalid data" )
+print( "1: insert some invalid data" );
var value = null;
@@ -26,37 +26,37 @@ assert.writeError(coll.insert({ _id : [ 1, 2 ] , i : 3}));
assert.writeOK(coll.insert({ i : 1 }));
// Update the value with valid other field
-value = coll.findOne({ i : 1 })
+value = coll.findOne({ i : 1 });
assert.writeOK(coll.update( value, { $set : { j : 2 } } ));
// Update the value with invalid other fields
-value = coll.findOne({ i : 1 })
+value = coll.findOne({ i : 1 });
assert.writeError(coll.update( value, Object.merge( value, { i : [ 3 ] } ) ));
// Multi-update the value with invalid other fields
-value = coll.findOne({ i : 1 })
+value = coll.findOne({ i : 1 });
assert.writeError(coll.update( value, Object.merge( value, { i : [ 3, 4 ] } ), false, true));
// Multi-update the value with other fields (won't work, but no error)
-value = coll.findOne({ i : 1 })
+value = coll.findOne({ i : 1 });
assert.writeOK(coll.update( Object.merge( value, { i : [ 1, 1 ] } ), { $set : { k : 4 } }, false, true));
// Query the value with other fields (won't work, but no error)
-value = coll.findOne({ i : 1 })
-coll.find( Object.merge( value, { i : [ 1, 1 ] } ) ).toArray()
+value = coll.findOne({ i : 1 });
+coll.find( Object.merge( value, { i : [ 1, 1 ] } ) ).toArray();
// Can't remove using multikey, but shouldn't error
-value = coll.findOne({ i : 1 })
-coll.remove( Object.extend( value, { i : [ 1, 2, 3, 4 ] } ) )
+value = coll.findOne({ i : 1 });
+coll.remove( Object.extend( value, { i : [ 1, 2, 3, 4 ] } ) );
// Can't remove using multikey, but shouldn't error
-value = coll.findOne({ i : 1 })
+value = coll.findOne({ i : 1 });
assert.writeOK(coll.remove( Object.extend( value, { i : [ 1, 2, 3, 4, 5 ] } ) ));
-assert.eq( coll.find().itcount(), 1 )
+assert.eq( coll.find().itcount(), 1 );
-value = coll.findOne({ i : 1 })
+value = coll.findOne({ i : 1 });
assert.writeOK(coll.remove( Object.extend( value, { i : 1 } ) ));
-assert.eq( coll.find().itcount(), 0 )
+assert.eq( coll.find().itcount(), 0 );
coll.ensureIndex({ _id : 1, i : 1, j: 1 });
// Can insert document that will make index into a multi-key as long as it's not part of shard key.
@@ -75,40 +75,40 @@ coll.remove({});
assert.writeOK(coll.update({ _id: 1, i: 1 }, { _id: 1, i:1, j: [1, 2] }, true));
assert.eq( coll.find().itcount(), 1 );
-printjson( "Sharding-then-inserting-multikey tested, now trying inserting-then-sharding-multikey" )
+printjson( "Sharding-then-inserting-multikey tested, now trying inserting-then-sharding-multikey" );
// Insert a bunch of data then shard over key which is an array
-var coll = mongos.getCollection( "" + coll + "2" )
+var coll = mongos.getCollection( "" + coll + "2" );
for( var i = 0; i < 10; i++ ){
// TODO : does not check weird cases like [ i, i ]
assert.writeOK(coll.insert({ i : [ i, i + 1 ] }));
}
-coll.ensureIndex({ _id : 1, i : 1 })
+coll.ensureIndex({ _id : 1, i : 1 });
try {
- st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } )
+ st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } );
}
catch( e ){
- print( "Correctly threw error on sharding with multikey index." )
+ print( "Correctly threw error on sharding with multikey index." );
}
-st.printShardingStatus()
+st.printShardingStatus();
// Insert a bunch of data then shard over key which is not an array
-var coll = mongos.getCollection( "" + coll + "3" )
+var coll = mongos.getCollection( "" + coll + "3" );
for( var i = 0; i < 10; i++ ){
// TODO : does not check weird cases like [ i, i ]
assert.writeOK(coll.insert({ i : i }));
}
-coll.ensureIndex({ _id : 1, i : 1 })
+coll.ensureIndex({ _id : 1, i : 1 });
-st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } )
+st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } );
-st.printShardingStatus()
+st.printShardingStatus();
// Finish
-st.stop()
+st.stop();
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index 8c643b1be64..8d45d4b2de3 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -127,11 +127,11 @@ assert.eq(result.ok, 1, tojson(result));
s.getDB("admin").runCommand({enableSharding : "test"});
s.getDB("admin").runCommand({shardCollection : "test.foo", key : {x : 1}});
-d1.waitForState( d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000 )
+d1.waitForState( d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000 );
s.getDB(testUser.db).createUser({user: testUser.username,
pwd: testUser.password,
- roles: jsTest.basicUserRoles})
+ roles: jsTest.basicUserRoles});
s.getDB(testUserReadOnly.db).createUser({user: testUserReadOnly.username,
pwd: testUserReadOnly.password,
roles: jsTest.readOnlyUserRoles});
@@ -170,12 +170,12 @@ shardName = authutil.asCluster(d2.nodes, "jstests/libs/key1",
print("adding shard "+shardName);
login(adminUser);
print("logged in");
-result = s.getDB("admin").runCommand({addShard : shardName})
+result = s.getDB("admin").runCommand({addShard : shardName});
ReplSetTest.awaitRSClientHosts(s.s, d1.nodes, {ok: true });
ReplSetTest.awaitRSClientHosts(s.s, d2.nodes, {ok: true });
-s.getDB("test").foo.remove({})
+s.getDB("test").foo.remove({});
var num = 10000;
var bulk = s.getDB("test").foo.initializeUnorderedBulkOp();
@@ -201,7 +201,7 @@ assert.soon(function() {
//SERVER-3645
//assert.eq(s.getDB("test").foo.count(), num+1);
-var numDocs = s.getDB("test").foo.find().itcount()
+var numDocs = s.getDB("test").foo.find().itcount();
if (numDocs != num) {
// Missing documents. At this point we're already in a failure mode, the code in this statement
// is to get a better idea how/why it's failing.
@@ -219,7 +219,7 @@ if (numDocs != num) {
lastDocNumber = docs[i].x;
numDocsSeen++;
}
- assert.eq(numDocs, numDocsSeen, "More docs discovered on second find()")
+ assert.eq(numDocs, numDocsSeen, "More docs discovered on second find()");
assert.eq(num - numDocs, missingDocNumbers.length);
load('jstests/libs/trace_missing_docs.js');
@@ -292,11 +292,11 @@ print("result: " + x);
// Test read only users
print( "starting read only tests" );
-var readOnlyS = new Mongo( s.getDB( "test" ).getMongo().host )
+var readOnlyS = new Mongo( s.getDB( "test" ).getMongo().host );
var readOnlyDB = readOnlyS.getDB( "test" );
print( " testing find that should fail" );
-assert.throws( function(){ readOnlyDB.foo.findOne(); } )
+assert.throws( function(){ readOnlyDB.foo.findOne(); } );
print( " logging in" );
login( testUserReadOnly , readOnlyS );
diff --git a/jstests/sharding/auth2.js b/jstests/sharding/auth2.js
index 25e7a0144c2..e58657e8dba 100644
--- a/jstests/sharding/auth2.js
+++ b/jstests/sharding/auth2.js
@@ -4,7 +4,7 @@ var st = new ShardingTest({ keyFile : 'jstests/libs/key1', shards : 2, chunkSize
var mongos = st.s;
var adminDB = mongos.getDB('admin');
-var db = mongos.getDB('test')
+var db = mongos.getDB('test');
adminDB.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js
index f502f90567a..258ec539c68 100644
--- a/jstests/sharding/authCommands.js
+++ b/jstests/sharding/authCommands.js
@@ -69,10 +69,10 @@ assert.writeOK(bulk.execute({ w: "majority"}));
assert.eq(expectedDocs, testDB.foo.count());
// Wait for the balancer to start back up
-st.startBalancer()
+st.startBalancer();
// Make sure we've done at least some splitting, so the balancer will work
-assert.gt( configDB.chunks.find({ ns : 'test.foo' }).count(), 2 )
+assert.gt( configDB.chunks.find({ ns : 'test.foo' }).count(), 2 );
// Make sure we eventually balance all the chunks we've created
assert.soon( function() {
@@ -82,11 +82,11 @@ assert.soon( function() {
}, "no balance happened", 5 * 60 * 1000 );
assert.soon( function(){
- print( "Waiting for migration cleanup to occur..." )
+ print( "Waiting for migration cleanup to occur..." );
return testDB.foo.find().itcount() == testDB.foo.count();
-})
+});
-var map = function() { emit (this.i, this.j) };
+var map = function() { emit (this.i, this.j); };
var reduce = function( key, values ) {
var jCount = 0;
values.forEach( function(j) { jCount += j; } );
@@ -97,19 +97,19 @@ var checkCommandSucceeded = function( db, cmdObj ) {
print( "Running command that should succeed: " );
printjson( cmdObj );
resultObj = db.runCommand( cmdObj );
- printjson( resultObj )
+ printjson( resultObj );
assert ( resultObj.ok );
return resultObj;
-}
+};
var checkCommandFailed = function( db, cmdObj ) {
print( "Running command that should fail: " );
printjson( cmdObj );
resultObj = db.runCommand( cmdObj );
- printjson( resultObj )
+ printjson( resultObj );
assert ( !resultObj.ok );
return resultObj;
-}
+};
var checkReadOps = function( hasReadAuth ) {
if ( hasReadAuth ) {
@@ -144,7 +144,7 @@ var checkReadOps = function( hasReadAuth ) {
pipeline: [ {$project : {j : 1}},
{$group : {_id : 'j', sum : {$sum : '$j'}}}]} );
}
-}
+};
var checkWriteOps = function( hasWriteAuth ) {
if ( hasWriteAuth ) {
@@ -197,7 +197,7 @@ var checkWriteOps = function( hasWriteAuth ) {
}
assert( !passed );
}
-}
+};
var checkAdminOps = function( hasAuth ) {
if ( hasAuth ) {
@@ -225,7 +225,7 @@ var checkAdminOps = function( hasAuth ) {
to : st.rs1.name, _waitForDelete : true} );
}
-}
+};
var checkRemoveShard = function( hasWriteAuth ) {
if ( hasWriteAuth ) {
@@ -235,12 +235,12 @@ var checkRemoveShard = function( hasWriteAuth ) {
checkRemoveShard = function() {
res = checkCommandSucceeded( adminDB, { removeshard : st.rs1.name } );
return res.msg == 'removeshard completed successfully';
- }
+ };
assert.soon( checkRemoveShard , "failed to remove shard" );
} else {
checkCommandFailed( adminDB, { removeshard : st.rs1.name } );
}
-}
+};
var checkAddShard = function( hasWriteAuth ) {
if ( hasWriteAuth ) {
@@ -248,7 +248,7 @@ var checkAddShard = function( hasWriteAuth ) {
} else {
checkCommandFailed( adminDB, { addshard : st.rs1.getURL() } );
}
-}
+};
st.stopBalancer();
@@ -285,7 +285,7 @@ assert( testDB.dropDatabase().ok );
checkRemoveShard( true );
adminDB.printShardingStatus();
-jsTestLog("Check adding a shard")
+jsTestLog("Check adding a shard");
assert( adminDB.logout().ok );
checkAddShard( false );
assert( adminDB.auth( rwUser, password ) );
@@ -294,6 +294,6 @@ adminDB.printShardingStatus();
st.stop();
-}
+};
doTest();
diff --git a/jstests/sharding/authConnectionHook.js b/jstests/sharding/authConnectionHook.js
index 2456cfdbb2c..4356180107d 100644
--- a/jstests/sharding/authConnectionHook.js
+++ b/jstests/sharding/authConnectionHook.js
@@ -5,7 +5,7 @@ var st = new ShardingTest({ keyFile : 'jstests/libs/key1', shards : 2, chunkSize
var mongos = st.s;
var adminDB = mongos.getDB('admin');
-var db = mongos.getDB('test')
+var db = mongos.getDB('test');
adminDB.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
diff --git a/jstests/sharding/auth_add_shard.js b/jstests/sharding/auth_add_shard.js
index ef2e5dfa760..592a2443119 100644
--- a/jstests/sharding/auth_add_shard.js
+++ b/jstests/sharding/auth_add_shard.js
@@ -88,7 +88,7 @@ assert.soon(function() {
printjson(result);
return result.ok && result.state == "completed";
-}, "failed to drain shard completely", 5 * 60 * 1000)
+}, "failed to drain shard completely", 5 * 60 * 1000);
assert.eq(1, st.config.shards.count() , "removed server still appears in count");
diff --git a/jstests/sharding/auth_copydb.js b/jstests/sharding/auth_copydb.js
index c971065b35e..6ecb45ac201 100644
--- a/jstests/sharding/auth_copydb.js
+++ b/jstests/sharding/auth_copydb.js
@@ -40,6 +40,6 @@ assert.eq(1, destTestDB.foo.findOne().a);
st.stop();
-}
+};
runTest();
diff --git a/jstests/sharding/auth_slaveok_routing.js b/jstests/sharding/auth_slaveok_routing.js
index 803450b7ec0..599aed242b5 100644
--- a/jstests/sharding/auth_slaveok_routing.js
+++ b/jstests/sharding/auth_slaveok_routing.js
@@ -45,7 +45,7 @@ var nodeCount = replTest.nodes.length;
* connections to access the server from localhost connections if there
* is no admin user.
*/
-var adminDB = mongos.getDB( 'admin' )
+var adminDB = mongos.getDB( 'admin' );
adminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles});
adminDB.auth( 'user', 'password' );
var priAdminDB = replTest.getPrimary().getDB( 'admin' );
diff --git a/jstests/sharding/authmr.js b/jstests/sharding/authmr.js
index a94a013e4dc..c827b4948b3 100644
--- a/jstests/sharding/authmr.js
+++ b/jstests/sharding/authmr.js
@@ -14,7 +14,7 @@ var adminUser = {
"dbAdminAnyDatabase",
"userAdminAnyDatabase",
"clusterAdmin" ]
-}
+};
var test1User = {
user: "test",
@@ -45,7 +45,7 @@ var cluster = new ShardingTest({ name: "authmr",
var test2DB = adminDB.getSiblingDB('test2');
var ex;
try {
- adminDB.createUser(adminUser)
+ adminDB.createUser(adminUser);
assert(adminDB.auth(adminUser.user, adminUser.pwd));
adminDB.dropUser(test1User.user);
diff --git a/jstests/sharding/authwhere.js b/jstests/sharding/authwhere.js
index 37dbbeca5bb..3d3d0d8a605 100644
--- a/jstests/sharding/authwhere.js
+++ b/jstests/sharding/authwhere.js
@@ -14,7 +14,7 @@ var adminUser = {
"dbAdminAnyDatabase",
"userAdminAnyDatabase",
"clusterAdmin" ]
-}
+};
var test1Reader = {
user: "test",
@@ -45,7 +45,7 @@ var cluster = new ShardingTest({ name: "authwhere",
var test2DB = adminDB.getSiblingDB('test2');
var ex;
try {
- adminDB.createUser(adminUser)
+ adminDB.createUser(adminUser);
assert(adminDB.auth(adminUser.user, adminUser.pwd));
adminDB.dropUser(test1Reader.user);
diff --git a/jstests/sharding/auto1.js b/jstests/sharding/auto1.js
index 644453c176f..6e04080fe80 100644
--- a/jstests/sharding/auto1.js
+++ b/jstests/sharding/auto1.js
@@ -13,7 +13,7 @@ bigString = "";
while ( bigString.length < 1024 * 50 )
bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
-db = s.getDB( "test" )
+db = s.getDB( "test" );
coll = db.foo;
var i=0;
@@ -26,7 +26,7 @@ assert.writeOK( bulk.execute() );
primary = s.getServer( "test" ).getDB( "test" );
-counts = []
+counts = [];
s.printChunks();
counts.push( s.config.chunks.count() );
@@ -40,8 +40,8 @@ for ( ; i<200; i++ ){
}
assert.writeOK( bulk.execute() );
-s.printChunks()
-s.printChangeLog()
+s.printChunks();
+s.printChangeLog();
counts.push( s.config.chunks.count() );
bulk = coll.initializeUnorderedBulkOp();
@@ -51,7 +51,7 @@ for ( ; i<400; i++ ){
assert.writeOK( bulk.execute() );
s.printChunks();
-s.printChangeLog()
+s.printChangeLog();
counts.push( s.config.chunks.count() );
bulk = coll.initializeUnorderedBulkOp();
@@ -61,18 +61,18 @@ for ( ; i<700; i++ ){
assert.writeOK( bulk.execute() );
s.printChunks();
-s.printChangeLog()
+s.printChangeLog();
counts.push( s.config.chunks.count() );
-assert( counts[counts.length-1] > counts[0] , "counts 1 : " + tojson( counts ) )
-sorted = counts.slice(0)
+assert( counts[counts.length-1] > counts[0] , "counts 1 : " + tojson( counts ) );
+sorted = counts.slice(0);
// Sort doesn't sort numbers correctly by default, resulting in fail
-sorted.sort( function(a, b){ return a - b } )
-assert.eq( counts , sorted , "counts 2 : " + tojson( counts ) )
+sorted.sort( function(a, b){ return a - b; } );
+assert.eq( counts , sorted , "counts 2 : " + tojson( counts ) );
-print( counts )
+print( counts );
-printjson( db.stats() )
+printjson( db.stats() );
s.stop();
diff --git a/jstests/sharding/auto2.js b/jstests/sharding/auto2.js
index 6115e7cda4a..afca165f2a3 100644
--- a/jstests/sharding/auto2.js
+++ b/jstests/sharding/auto2.js
@@ -52,11 +52,11 @@ assert.soon( function(){
doCountsGlobal();
print("Counts: " + counta + countb);
- return counta > 0 && countb > 0
+ return counta > 0 && countb > 0;
});
-print("checkpoint B" )
+print("checkpoint B" );
var missing = [];
@@ -82,15 +82,15 @@ for ( i=0; i<j*100; i++ ){
s.printChangeLog();
-print("missing: " + tojson( missing ) )
+print("missing: " + tojson( missing ) );
assert.soon( function(z){ return doCountsGlobal() == j * 100; } , "from each a:" + counta + " b:" + countb + " i:" + i );
-print("checkpoint B.a" )
+print("checkpoint B.a" );
s.printChunks();
assert.eq( j * 100 , coll.find().limit(100000000).itcount() , "itcount A" );
assert.eq( j * 100 , counta + countb , "from each 2 a:" + counta + " b:" + countb + " i:" + i );
assert( missing.length == 0 , "missing : " + tojson( missing ) );
-print("checkpoint C" )
+print("checkpoint C" );
assert( Array.unique( s.config.chunks.find().toArray().map( function(z){ return z.shard; } ) ).length == 2 , "should be using both servers" );
@@ -101,7 +101,7 @@ for ( i=0; i<100; i++ ){
gc();
}
-print("checkpoint D")
+print("checkpoint D");
// test not-sharded cursors
db = s.getDB("test2" );
@@ -120,9 +120,9 @@ for ( i=0; i<100; i++ ){
assert.eq(0, db.serverStatus().metrics.cursor.open.total, "cursor2");
// Stop the balancer, otherwise it may grab some connections from the pool for itself
-s.stopBalancer()
+s.stopBalancer();
-print("checkpoint E")
+print("checkpoint E");
assert( t.findOne() , "check close 0" );
@@ -140,7 +140,7 @@ print("checkpoint F");
assert.throws(function() {
s.getDB("test" ).foo.find().sort({ s : 1 }).forEach(function(x) {
printjsononeline(x.substring(0, x.length > 30 ? 30 : x.length));
- })
+ });
});
print("checkpoint G");
diff --git a/jstests/sharding/balance_repl.js b/jstests/sharding/balance_repl.js
index 16ae418eb0e..372d60d86be 100644
--- a/jstests/sharding/balance_repl.js
+++ b/jstests/sharding/balance_repl.js
@@ -11,7 +11,7 @@ for (var i = 0; i < 2100; i++) {
}
assert.writeOK(bulk.execute());
-s.adminCommand( { enablesharding : "test" } )
+s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'test-rs0');
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
diff --git a/jstests/sharding/bouncing_count.js b/jstests/sharding/bouncing_count.js
index f6fc6bfc063..cdd723f8a44 100644
--- a/jstests/sharding/bouncing_count.js
+++ b/jstests/sharding/bouncing_count.js
@@ -5,50 +5,50 @@ var st = new ShardingTest({ name: "test",
shards: 10,
mongos: 3 });
-var mongosA = st.s0
-var mongosB = st.s1
-var mongosC = st.s2
+var mongosA = st.s0;
+var mongosB = st.s1;
+var mongosC = st.s2;
-var admin = mongosA.getDB("admin")
-var config = mongosA.getDB("config")
+var admin = mongosA.getDB("admin");
+var config = mongosA.getDB("config");
-var collA = mongosA.getCollection( "foo.bar" )
-var collB = mongosB.getCollection( "" + collA )
-var collC = mongosB.getCollection( "" + collA )
+var collA = mongosA.getCollection( "foo.bar" );
+var collB = mongosB.getCollection( "" + collA );
+var collC = mongosB.getCollection( "" + collA );
admin.runCommand({ enableSharding : "" + collA.getDB() });
st.ensurePrimaryShard(collA.getDB().getName(), 'shard0001');
-admin.runCommand({ shardCollection : "" + collA, key : { _id : 1 } })
+admin.runCommand({ shardCollection : "" + collA, key : { _id : 1 } });
-var shards = config.shards.find().sort({ _id : 1 }).toArray()
+var shards = config.shards.find().sort({ _id : 1 }).toArray();
-jsTestLog( "Splitting up the collection..." )
+jsTestLog( "Splitting up the collection..." );
// Split up the collection
for( var i = 0; i < shards.length; i++ ){
- printjson( admin.runCommand({ split : "" + collA, middle : { _id : i } }) )
- printjson( admin.runCommand({ moveChunk : "" + collA, find : { _id : i }, to : shards[i]._id }) )
+ printjson( admin.runCommand({ split : "" + collA, middle : { _id : i } }) );
+ printjson( admin.runCommand({ moveChunk : "" + collA, find : { _id : i }, to : shards[i]._id }) );
}
-mongosB.getDB("admin").runCommand({ flushRouterConfig : 1 })
-mongosC.getDB("admin").runCommand({ flushRouterConfig : 1 })
-printjson( collB.count() )
-printjson( collC.count() )
+mongosB.getDB("admin").runCommand({ flushRouterConfig : 1 });
+mongosC.getDB("admin").runCommand({ flushRouterConfig : 1 });
+printjson( collB.count() );
+printjson( collC.count() );
// Change up all the versions...
for( var i = 0; i < shards.length; i++ ){
- printjson( admin.runCommand({ moveChunk : "" + collA, find : { _id : i }, to : shards[ (i + 1) % shards.length ]._id }) )
+ printjson( admin.runCommand({ moveChunk : "" + collA, find : { _id : i }, to : shards[ (i + 1) % shards.length ]._id }) );
}
// Make sure mongos A is up-to-date
-mongosA.getDB("admin").runCommand({ flushRouterConfig : 1 })
+mongosA.getDB("admin").runCommand({ flushRouterConfig : 1 });
-config.printShardingStatus( true )
+config.printShardingStatus( true );
-jsTestLog( "Running count!" )
+jsTestLog( "Running count!" );
-printjson( collB.count() )
-printjson( collC.find().toArray() )
+printjson( collB.count() );
+printjson( collC.find().toArray() );
st.stop();
diff --git a/jstests/sharding/bulk_insert.js b/jstests/sharding/bulk_insert.js
index 1863141d020..306c2a82020 100644
--- a/jstests/sharding/bulk_insert.js
+++ b/jstests/sharding/bulk_insert.js
@@ -5,10 +5,10 @@
var st = new ShardingTest({ shards : 2, mongos : 2 });
var mongos = st.s;
-var staleMongos = st.s1
+var staleMongos = st.s1;
var config = mongos.getDB("config");
var admin = mongos.getDB("admin");
-var shards = config.shards.find().toArray()
+var shards = config.shards.find().toArray();
for (var i = 0; i < shards.length; i++) {
shards[i].conn = new Mongo(shards[i].host);
@@ -24,7 +24,7 @@ assert.writeError(admin.TestColl.insert([ { Doc1: 1 }, { Doc2: 1 } ]));
jsTest.log("Setting up collections...");
-assert.commandWorked(admin.runCommand({ enableSharding : collSh.getDB() + "" }))
+assert.commandWorked(admin.runCommand({ enableSharding : collSh.getDB() + "" }));
st.ensurePrimaryShard(collSh.getDB() + "", shards[0]._id);
assert.commandWorked(admin.runCommand({ movePrimary : collUn.getDB() + "",
@@ -60,11 +60,11 @@ st.printShardingStatus();
// BREAK-ON-ERROR
//
-jsTest.log("Bulk insert (no ContinueOnError) to single shard...")
+jsTest.log("Bulk insert (no ContinueOnError) to single shard...");
resetColls();
var inserts = [{ukey : 0},
- {ukey : 1}]
+ {ukey : 1}];
assert.writeOK(collSh.insert(inserts));
assert.eq(2, collSh.find().itcount());
@@ -75,22 +75,22 @@ assert.eq(2, collUn.find().itcount());
assert.writeOK(collDi.insert(inserts));
assert.eq(2, collDi.find().itcount());
-jsTest.log("Bulk insert (no COE) with mongos error...")
+jsTest.log("Bulk insert (no COE) with mongos error...");
resetColls();
var inserts = [{ukey : 0},
{hello : "world"},
- {ukey : 1}]
+ {ukey : 1}];
assert.writeError(collSh.insert(inserts));
assert.eq(1, collSh.find().itcount());
-jsTest.log("Bulk insert (no COE) with mongod error...")
+jsTest.log("Bulk insert (no COE) with mongod error...");
resetColls();
var inserts = [{ukey : 0},
{ukey : 0},
- {ukey : 1}]
+ {ukey : 1}];
assert.writeError(collSh.insert(inserts));
assert.eq(1, collSh.find().itcount());
@@ -101,13 +101,13 @@ assert.eq(1, collUn.find().itcount());
assert.writeError(collDi.insert(inserts));
assert.eq(1, collDi.find().itcount());
-jsTest.log("Bulk insert (no COE) with mongod and mongos error...")
+jsTest.log("Bulk insert (no COE) with mongod and mongos error...");
resetColls();
var inserts = [{ukey : 0},
{ukey : 0},
{ukey : 1},
- {hello : "world"}]
+ {hello : "world"}];
var res = assert.writeError(collSh.insert(inserts));
assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
@@ -121,11 +121,11 @@ res = assert.writeError(collDi.insert(inserts));
assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
assert.eq(1, collDi.find().itcount());
-jsTest.log("Bulk insert (no COE) on second shard...")
+jsTest.log("Bulk insert (no COE) on second shard...");
resetColls();
var inserts = [{ukey : 0},
- {ukey : -1}]
+ {ukey : -1}];
assert.writeOK(collSh.insert(inserts));
assert.eq(2, collSh.find().itcount());
@@ -136,25 +136,25 @@ assert.eq(2, collUn.find().itcount());
assert.writeOK(collDi.insert(inserts));
assert.eq(2, collDi.find().itcount());
-jsTest.log("Bulk insert to second shard (no COE) with mongos error...")
+jsTest.log("Bulk insert to second shard (no COE) with mongos error...");
resetColls();
var inserts = [{ukey : 0},
{ukey : 1}, // switches shards
{ukey : -1},
- {hello : "world"}]
+ {hello : "world"}];
assert.writeError(collSh.insert(inserts));
assert.eq(3, collSh.find().itcount());
-jsTest.log("Bulk insert to second shard (no COE) with mongod error...")
+jsTest.log("Bulk insert to second shard (no COE) with mongod error...");
resetColls();
var inserts = [{ukey : 0},
{ukey : 1},
{ukey : -1},
{ukey : -2},
- {ukey : -2}]
+ {ukey : -2}];
assert.writeError(collSh.insert(inserts));
assert.eq(4, collSh.find().itcount());
@@ -165,7 +165,7 @@ assert.eq(4, collUn.find().itcount());
assert.writeError(collDi.insert(inserts));
assert.eq(4, collDi.find().itcount());
-jsTest.log("Bulk insert to third shard (no COE) with mongod and mongos error...")
+jsTest.log("Bulk insert to third shard (no COE) with mongod and mongos error...");
resetColls();
var inserts = [{ukey : 0},
@@ -174,7 +174,7 @@ var inserts = [{ukey : 0},
{ukey : -3},
{ukey : 4},
{ukey : 4},
- {hello : "world"}]
+ {hello : "world"}];
res = assert.writeError(collSh.insert(inserts));
assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
@@ -192,22 +192,22 @@ assert.eq(5, collDi.find().itcount());
// CONTINUE-ON-ERROR
//
-jsTest.log("Bulk insert (yes COE) with mongos error...")
+jsTest.log("Bulk insert (yes COE) with mongos error...");
resetColls();
var inserts = [{ukey : 0},
{hello : "world"},
- {ukey : 1}]
+ {ukey : 1}];
assert.writeError(collSh.insert(inserts, 1)); // COE
assert.eq(2, collSh.find().itcount());
-jsTest.log("Bulk insert (yes COE) with mongod error...")
+jsTest.log("Bulk insert (yes COE) with mongod error...");
resetColls();
var inserts = [{ukey : 0},
{ukey : 0},
- {ukey : 1}]
+ {ukey : 1}];
assert.writeError(collSh.insert(inserts, 1));
assert.eq(2, collSh.find().itcount());
@@ -219,7 +219,7 @@ assert.writeError(collDi.insert(inserts, 1));
assert.eq(2, collDi.find().itcount());
jsTest
- .log("Bulk insert to third shard (yes COE) with mongod and mongos error...")
+ .log("Bulk insert to third shard (yes COE) with mongod and mongos error...");
resetColls();
var inserts = [{ukey : 0},
@@ -228,7 +228,7 @@ var inserts = [{ukey : 0},
{ukey : -3},
{ukey : 4},
{ukey : 4},
- {hello : "world"}]
+ {hello : "world"}];
// Last error here is mongos error
res = assert.writeError(collSh.insert(inserts, 1));
@@ -245,7 +245,7 @@ assert.eq(6, res.nInserted, res.toString());
assert.eq(6, collDi.find().itcount());
jsTest.log("Bulk insert to third shard (yes COE) with mongod and mongos error "
- + "(mongos error first)...")
+ + "(mongos error first)...");
resetColls();
var inserts = [{ukey : 0},
@@ -299,7 +299,7 @@ assert.writeOK(staleCollSh.insert(inserts));
// together with WBL.
//
-jsTest.log("Testing bulk insert (no COE) with WBL and large objects...")
+jsTest.log("Testing bulk insert (no COE) with WBL and large objects...");
resetColls();
var data10MB = 'x'.repeat(10 * 1024 * 1024);
diff --git a/jstests/sharding/bulk_shard_insert.js b/jstests/sharding/bulk_shard_insert.js
index a349d770a2f..7d42d8b41a2 100644
--- a/jstests/sharding/bulk_shard_insert.js
+++ b/jstests/sharding/bulk_shard_insert.js
@@ -2,42 +2,42 @@
(function() {
// Setup randomized test
-var seed = new Date().getTime()
+var seed = new Date().getTime();
// seed = 0
-Random.srand( seed )
-print( "Seeded with " + seed )
+Random.srand( seed );
+print( "Seeded with " + seed );
-var st = new ShardingTest({ name : jsTestName(), shards : 4, chunkSize: 1 })
+var st = new ShardingTest({ name : jsTestName(), shards : 4, chunkSize: 1 });
// Setup sharded collection
-var mongos = st.s0
-var db = mongos.getDB( jsTestName() )
-var coll = db.coll
-st.shardColl( coll, { _id : 1 }, false )
+var mongos = st.s0;
+var db = mongos.getDB( jsTestName() );
+var coll = db.coll;
+st.shardColl( coll, { _id : 1 }, false );
// Insert lots of bulk documents
-var numDocs = 1000000
+var numDocs = 1000000;
-var bulkSize = Math.floor( Random.rand() * 1000 ) + 2
-bulkSize = 4000
-var docSize = 128 /* bytes */
-print( "\n\n\nBulk size is " + bulkSize )
+var bulkSize = Math.floor( Random.rand() * 1000 ) + 2;
+bulkSize = 4000;
+var docSize = 128; /* bytes */
+print( "\n\n\nBulk size is " + bulkSize );
-var data = "x"
+var data = "x";
while( Object.bsonsize({ x : data }) < docSize ){
- data += data
+ data += data;
}
-print( "\n\n\nDocument size is " + Object.bsonsize({ x : data }) )
+print( "\n\n\nDocument size is " + Object.bsonsize({ x : data }) );
var docsInserted = 0;
var balancerOn = false;
while (docsInserted < numDocs) {
- var currBulkSize = ( numDocs - docsInserted > bulkSize ) ? bulkSize : ( numDocs - docsInserted )
+ var currBulkSize = ( numDocs - docsInserted > bulkSize ) ? bulkSize : ( numDocs - docsInserted );
- var bulk = []
+ var bulk = [];
for( var i = 0; i < currBulkSize; i++ ){
bulk.push({hi: "there", at: docsInserted, i: i, x: data});
}
@@ -45,33 +45,33 @@ while (docsInserted < numDocs) {
assert.writeOK(coll.insert( bulk ));
if( Math.floor( docsInserted / 10000 ) != Math.floor( ( docsInserted + currBulkSize ) / 10000 ) ){
- print( "Inserted " + (docsInserted + currBulkSize) + " documents." )
- st.printShardingStatus()
+ print( "Inserted " + (docsInserted + currBulkSize) + " documents." );
+ st.printShardingStatus();
}
- docsInserted += currBulkSize
+ docsInserted += currBulkSize;
if( docsInserted > numDocs / 2 && ! balancerOn ){
- print( "Turning on balancer after half documents inserted." )
+ print( "Turning on balancer after half documents inserted." );
st.startBalancer();
balancerOn = true;
}
}
// Check we inserted all the documents
-st.printShardingStatus()
+st.printShardingStatus();
-var count = coll.find().count()
-var itcount = count //coll.find().itcount()
+var count = coll.find().count();
+var itcount = count; //coll.find().itcount()
print("Inserted " + docsInserted + " count : " + count + " itcount : " + itcount);
st.startBalancer();
-var count = coll.find().count()
-var itcount = coll.find().itcount()
+var count = coll.find().count();
+var itcount = coll.find().itcount();
-print( "Inserted " + docsInserted + " count : " + count + " itcount : " + itcount )
+print( "Inserted " + docsInserted + " count : " + count + " itcount : " + itcount );
// SERVER-3645
diff --git a/jstests/sharding/cleanup_orphaned_auth.js b/jstests/sharding/cleanup_orphaned_auth.js
index 3b6e3fb21e5..e1364f4ad12 100644
--- a/jstests/sharding/cleanup_orphaned_auth.js
+++ b/jstests/sharding/cleanup_orphaned_auth.js
@@ -56,4 +56,4 @@ assertUnauthorized(shardAdmin.runCommand({cleanupOrphaned: 'foo.bar'}));
st.stop();
-})()
+})();
diff --git a/jstests/sharding/cleanup_orphaned_basic.js b/jstests/sharding/cleanup_orphaned_basic.js
index d3943784b59..387863ee75d 100644
--- a/jstests/sharding/cleanup_orphaned_basic.js
+++ b/jstests/sharding/cleanup_orphaned_basic.js
@@ -141,4 +141,4 @@ assert.commandFailed(shardAdmin.runCommand({
st.stop();
-})()
+})();
diff --git a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js
index 157bd3a7d9b..a4356524b09 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js
@@ -159,4 +159,4 @@ assert.eq(31, coll.count());
st.stop();
-})()
+})();
diff --git a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
index f6c375891dd..fdc6d6c8dff 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
@@ -126,4 +126,4 @@ assert.eq(1, coll.count());
st.stop();
-})()
+})();
diff --git a/jstests/sharding/cleanup_orphaned_cmd_hashed.js b/jstests/sharding/cleanup_orphaned_cmd_hashed.js
index f87ff093704..00294087885 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_hashed.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_hashed.js
@@ -69,4 +69,4 @@ jsTest.log( "DONE!" );
st.stop();
-})()
+})();
diff --git a/jstests/sharding/coll_epoch_test0.js b/jstests/sharding/coll_epoch_test0.js
index 35d01ff786d..d294a7e0998 100644
--- a/jstests/sharding/coll_epoch_test0.js
+++ b/jstests/sharding/coll_epoch_test0.js
@@ -1,20 +1,20 @@
// Tests whether a split and a migrate in a sharded cluster preserve the epoch
-var st = new ShardingTest( { shards : 2, mongos : 1 } )
+var st = new ShardingTest( { shards : 2, mongos : 1 } );
// Balancer is by default stopped, thus it will not interfere
-var config = st.s.getDB( "config" )
-var admin = st.s.getDB( "admin" )
-var coll = st.s.getCollection( "foo.bar" )
+var config = st.s.getDB( "config" );
+var admin = st.s.getDB( "admin" );
+var coll = st.s.getCollection( "foo.bar" );
// First enable sharding
-admin.runCommand({ enableSharding : coll.getDB() + "" })
+admin.runCommand({ enableSharding : coll.getDB() + "" });
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } })
+admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
-var primary = config.databases.find({ _id : coll.getDB() + "" }).primary
-var notPrimary = null
-config.shards.find().forEach( function( doc ){ if( doc._id != primary ) notPrimary = doc._id } )
+var primary = config.databases.find({ _id : coll.getDB() + "" }).primary;
+var notPrimary = null;
+config.shards.find().forEach( function( doc ){ if( doc._id != primary ) notPrimary = doc._id; } );
var createdEpoch = null;
var checkEpochs = function(){
@@ -23,25 +23,25 @@ var checkEpochs = function(){
// Make sure the epochs exist, are non-zero, and are consistent
assert( chunk.lastmodEpoch );
print( chunk.lastmodEpoch + "" );
- assert.neq( chunk.lastmodEpoch + "", "000000000000000000000000" )
- if( createdEpoch == null ) createdEpoch = chunk.lastmodEpoch
- else assert.eq( createdEpoch, chunk.lastmodEpoch )
+ assert.neq( chunk.lastmodEpoch + "", "000000000000000000000000" );
+ if( createdEpoch == null ) createdEpoch = chunk.lastmodEpoch;
+ else assert.eq( createdEpoch, chunk.lastmodEpoch );
- })
-}
+ });
+};
checkEpochs();
// Now do a split
-printjson( admin.runCommand({ split : coll + "", middle : { _id : 0 } }) )
+printjson( admin.runCommand({ split : coll + "", middle : { _id : 0 } }) );
// Check all the chunks for epochs
checkEpochs();
// Now do a migrate
-printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : notPrimary }) )
+printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : notPrimary }) );
// Check all the chunks for epochs
checkEpochs();
-st.stop()
+st.stop();
diff --git a/jstests/sharding/coll_epoch_test1.js b/jstests/sharding/coll_epoch_test1.js
index 472f232ba7f..fa24a035da7 100644
--- a/jstests/sharding/coll_epoch_test1.js
+++ b/jstests/sharding/coll_epoch_test1.js
@@ -1,78 +1,78 @@
// Tests various cases of dropping and recreating collections in the same namespace with multiple mongoses
-var st = new ShardingTest({ shards : 3, mongos : 3, verbose : 1 })
+var st = new ShardingTest({ shards : 3, mongos : 3, verbose : 1 });
// Balancer is by default stopped, thus it will not interfere
// Use separate mongoses for admin, inserting data, and validating results, so no
// single-mongos tricks will work
-var insertMongos = st.s2
-var staleMongos = st.s1
+var insertMongos = st.s2;
+var staleMongos = st.s1;
-var config = st.s.getDB( "config" )
-var admin = st.s.getDB( "admin" )
-var coll = st.s.getCollection( "foo.bar" )
+var config = st.s.getDB( "config" );
+var admin = st.s.getDB( "admin" );
+var coll = st.s.getCollection( "foo.bar" );
-insertMongos.getDB( "admin" ).runCommand({ setParameter : 1, traceExceptions : true })
+insertMongos.getDB( "admin" ).runCommand({ setParameter : 1, traceExceptions : true });
-var shards = {}
+var shards = {};
config.shards.find().forEach( function( doc ){
- shards[ doc._id ] = new Mongo( doc.host )
-})
+ shards[ doc._id ] = new Mongo( doc.host );
+});
//
// Test that inserts and queries go to the correct shard even when the collection has been sharded
// in the background
//
-jsTest.log( "Enabling sharding for the first time..." )
+jsTest.log( "Enabling sharding for the first time..." );
-admin.runCommand({ enableSharding : coll.getDB() + "" })
+admin.runCommand({ enableSharding : coll.getDB() + "" });
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } })
+admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
var bulk = insertMongos.getCollection( coll + "" ).initializeUnorderedBulkOp();
for( var i = 0; i < 100; i++ ) {
bulk.insert({ _id : i, test : "a" });
}
assert.writeOK( bulk.execute() );
-assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "a" }).itcount() )
+assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "a" }).itcount() );
-coll.drop()
+coll.drop();
//
// Test that inserts and queries go to the correct shard even when the collection has been
// re-sharded in the background
//
-jsTest.log( "Re-enabling sharding with a different key..." )
+jsTest.log( "Re-enabling sharding with a different key..." );
-admin.runCommand({ enableSharding : coll.getDB() + "" })
+admin.runCommand({ enableSharding : coll.getDB() + "" });
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-coll.ensureIndex({ notId : 1 })
-admin.runCommand({ shardCollection : coll + "", key : { notId : 1 } })
+coll.ensureIndex({ notId : 1 });
+admin.runCommand({ shardCollection : coll + "", key : { notId : 1 } });
bulk = insertMongos.getCollection( coll + "" ).initializeUnorderedBulkOp();
for( var i = 0; i < 100; i++ ) {
bulk.insert({ notId : i, test : "b" });
}
assert.writeOK( bulk.execute() );
-assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "b" }).itcount() )
-assert.eq( 0, staleMongos.getCollection( coll + "" ).find({ test : { $in : [ "a" ] } }).itcount() )
+assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "b" }).itcount() );
+assert.eq( 0, staleMongos.getCollection( coll + "" ).find({ test : { $in : [ "a" ] } }).itcount() );
-coll.drop()
+coll.drop();
//
// Test that inserts and queries go to the correct shard even when the collection has been
// unsharded and moved to a different primary
//
-jsTest.log( "Re-creating unsharded collection from a sharded collection on different primary..." )
+jsTest.log( "Re-creating unsharded collection from a sharded collection on different primary..." );
var getOtherShard = function( shard ){
for( id in shards ){
- if( id != shard ) return id
+ if( id != shard ) return id;
}
-}
+};
var otherShard = getOtherShard(config.databases.findOne({_id: coll.getDB() + ""}).primary);
assert.commandWorked(admin.runCommand({movePrimary: coll.getDB() + "", to: otherShard}));
@@ -81,40 +81,40 @@ if (st.configRS) {
// the most recent config data.
st.configRS.awaitLastOpCommitted();
}
-jsTest.log( "moved primary..." )
+jsTest.log( "moved primary..." );
bulk = insertMongos.getCollection( coll + "" ).initializeUnorderedBulkOp();
for( var i = 0; i < 100; i++ )
bulk.insert({ test : "c" });
assert.writeOK( bulk.execute() );
-assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "c" }).itcount() )
-assert.eq( 0, staleMongos.getCollection( coll + "" ).find({ test : { $in : [ "a", "b" ] } }).itcount() )
+assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "c" }).itcount() );
+assert.eq( 0, staleMongos.getCollection( coll + "" ).find({ test : { $in : [ "a", "b" ] } }).itcount() );
-coll.drop()
+coll.drop();
//
// Test that inserts and queries go to correct shard even when the collection has been unsharded,
// resharded, and moved to a different primary
//
-jsTest.log( "Re-creating sharded collection with different primary..." )
+jsTest.log( "Re-creating sharded collection with different primary..." );
-admin.runCommand({ enableSharding : coll.getDB() + "" })
+admin.runCommand({ enableSharding : coll.getDB() + "" });
admin.runCommand({ movePrimary : coll.getDB() + "",
- to : getOtherShard( config.databases.findOne({ _id : coll.getDB() + "" }).primary ) })
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } })
+ to : getOtherShard( config.databases.findOne({ _id : coll.getDB() + "" }).primary ) });
+admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
bulk = insertMongos.getCollection( coll + "" ).initializeUnorderedBulkOp();
for( var i = 0; i < 100; i++ )
bulk.insert({ test : "d" });
assert.writeOK( bulk.execute() );
-assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "d" }).itcount() )
-assert.eq( 0, staleMongos.getCollection( coll + "" ).find({ test : { $in : [ "a", "b", "c" ] } }).itcount() )
+assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "d" }).itcount() );
+assert.eq( 0, staleMongos.getCollection( coll + "" ).find({ test : { $in : [ "a", "b", "c" ] } }).itcount() );
-coll.drop()
+coll.drop();
-jsTest.log( "Done!" )
+jsTest.log( "Done!" );
-st.stop()
+st.stop();
diff --git a/jstests/sharding/coll_epoch_test2.js b/jstests/sharding/coll_epoch_test2.js
index c76cf51266f..09109ebce43 100644
--- a/jstests/sharding/coll_epoch_test2.js
+++ b/jstests/sharding/coll_epoch_test2.js
@@ -4,61 +4,61 @@
// operation on a mongos may be active when it happens. All operations should handle gracefully.
//
-var st = new ShardingTest({ shards : 2, mongos : 5, verbose : 1 })
+var st = new ShardingTest({ shards : 2, mongos : 5, verbose : 1 });
// Balancer is by default stopped, thus it will not interfere
// Use separate mongos for reading, updating, inserting, removing data
-var readMongos = st.s1
-var updateMongos = st.s2
-var insertMongos = st.s3
-var removeMongos = st.s4
+var readMongos = st.s1;
+var updateMongos = st.s2;
+var insertMongos = st.s3;
+var removeMongos = st.s4;
-var config = st.s.getDB( "config" )
-var admin = st.s.getDB( "admin" )
-var coll = st.s.getCollection( "foo.bar" )
+var config = st.s.getDB( "config" );
+var admin = st.s.getDB( "admin" );
+var coll = st.s.getCollection( "foo.bar" );
-insertMongos.getDB( "admin" ).runCommand({ setParameter : 1, traceExceptions : true })
+insertMongos.getDB( "admin" ).runCommand({ setParameter : 1, traceExceptions : true });
-var shards = {}
+var shards = {};
config.shards.find().forEach( function( doc ){
- shards[ doc._id ] = new Mongo( doc.host )
-})
+ shards[ doc._id ] = new Mongo( doc.host );
+});
//
// Set up a sharded collection
//
-jsTest.log( "Enabling sharding for the first time..." )
+jsTest.log( "Enabling sharding for the first time..." );
-admin.runCommand({ enableSharding : coll.getDB() + "" })
+admin.runCommand({ enableSharding : coll.getDB() + "" });
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } })
+admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
assert.writeOK(coll.insert({ hello : "world" }));
-jsTest.log( "Sharding collection across multiple shards..." )
+jsTest.log( "Sharding collection across multiple shards..." );
var getOtherShard = function( shard ){
for( id in shards ){
- if( id != shard ) return id
+ if( id != shard ) return id;
}
-}
+};
-printjson( admin.runCommand({ split : coll + "", middle : { _id : 0 } }) )
+printjson( admin.runCommand({ split : coll + "", middle : { _id : 0 } }) );
printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 },
- to : getOtherShard( config.databases.findOne({ _id : coll.getDB() + "" }).primary ) }) )
+ to : getOtherShard( config.databases.findOne({ _id : coll.getDB() + "" }).primary ) }) );
-st.printShardingStatus()
+st.printShardingStatus();
//
// Force all mongoses to load the current status of the cluster
//
-jsTest.log( "Loading this status in all mongoses..." )
+jsTest.log( "Loading this status in all mongoses..." );
for( var i = 0; i < st._mongos.length; i++ ){
- printjson( st._mongos[i].getDB( "admin" ).runCommand({ flushRouterConfig : 1 }) )
- assert.neq( null, st._mongos[i].getCollection( coll + "" ).findOne() )
+ printjson( st._mongos[i].getDB( "admin" ).runCommand({ flushRouterConfig : 1 }) );
+ assert.neq( null, st._mongos[i].getCollection( coll + "" ).findOne() );
}
//
@@ -66,9 +66,9 @@ for( var i = 0; i < st._mongos.length; i++ ){
// versions are the same, but the split is at a different point.
//
-jsTest.log( "Rebuilding sharded collection with different split..." )
+jsTest.log( "Rebuilding sharded collection with different split..." );
-coll.drop()
+coll.drop();
var droppedCollDoc = config.collections.findOne({ _id: coll.getFullName() });
assert(droppedCollDoc != null);
@@ -77,46 +77,46 @@ assert(droppedCollDoc.lastmodEpoch != null);
assert(droppedCollDoc.lastmodEpoch.equals(new ObjectId("000000000000000000000000")),
"epoch not zero: " + droppedCollDoc.lastmodEpoch);
-admin.runCommand({ enableSharding : coll.getDB() + "" })
+admin.runCommand({ enableSharding : coll.getDB() + "" });
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } })
+admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
var bulk = coll.initializeUnorderedBulkOp();
for( var i = 0; i < 100; i++ ) bulk.insert({ _id : i });
assert.writeOK(bulk.execute());
-printjson( admin.runCommand({ split : coll + "", middle : { _id : 200 } }) )
+printjson( admin.runCommand({ split : coll + "", middle : { _id : 200 } }) );
printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 200 },
- to : getOtherShard( config.databases.findOne({ _id : coll.getDB() + "" }).primary ) }) )
+ to : getOtherShard( config.databases.findOne({ _id : coll.getDB() + "" }).primary ) }) );
//
// Make sure all operations on mongoses aren't tricked by the change
//
-jsTest.log( "Checking other mongoses for detection of change..." )
+jsTest.log( "Checking other mongoses for detection of change..." );
-jsTest.log( "Checking find..." )
+jsTest.log( "Checking find..." );
// Ensure that finding an element works when resharding
-assert.neq( null, readMongos.getCollection( coll + "" ).findOne({ _id : 1 }) )
+assert.neq( null, readMongos.getCollection( coll + "" ).findOne({ _id : 1 }) );
-jsTest.log( "Checking update...")
+jsTest.log( "Checking update...");
// Ensure that updating an element finds the right location
assert.writeOK(updateMongos.getCollection( coll + "" ).update({ _id : 1 },
{ $set : { updated : true } }));
-assert.neq( null, coll.findOne({ updated : true }) )
+assert.neq( null, coll.findOne({ updated : true }) );
-jsTest.log( "Checking insert..." )
+jsTest.log( "Checking insert..." );
// Ensure that inserting an element finds the right shard
assert.writeOK(insertMongos.getCollection( coll + "" ).insert({ _id : 101 }));
-assert.neq( null, coll.findOne({ _id : 101 }) )
+assert.neq( null, coll.findOne({ _id : 101 }) );
-jsTest.log( "Checking remove..." )
+jsTest.log( "Checking remove..." );
// Ensure that removing an element finds the right shard, verified by the mongos doing the sharding
assert.writeOK(removeMongos.getCollection( coll + "" ).remove({ _id : 2 }));
-assert.eq( null, coll.findOne({ _id : 2 }) )
+assert.eq( null, coll.findOne({ _id : 2 }) );
-coll.drop()
+coll.drop();
-jsTest.log( "Done!" )
+jsTest.log( "Done!" );
-st.stop()
+st.stop();
diff --git a/jstests/sharding/count1.js b/jstests/sharding/count1.js
index 486bf40080b..3d9f3f874be 100644
--- a/jstests/sharding/count1.js
+++ b/jstests/sharding/count1.js
@@ -6,9 +6,9 @@ var db = s.getDB( "test" );
// ************** Test Set #1 *************
// Basic counts on "bar" collections, not yet sharded
-db.bar.save( { n : 1 } )
-db.bar.save( { n : 2 } )
-db.bar.save( { n : 3 } )
+db.bar.save( { n : 1 } );
+db.bar.save( { n : 2 } );
+db.bar.save( { n : 3 } );
assert.eq( 3 , db.bar.find().count() , "bar 1" );
assert.eq( 1 , db.bar.find( { n : 1 } ).count() , "bar 2" );
@@ -28,7 +28,7 @@ assert.eq( 1 , db.bar.find( { n : 1 } ).count() , "bar 2" );
// 7. test invalid queries/values
// part 1
-s.adminCommand( { enablesharding : "test" } )
+s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
s.adminCommand( { shardcollection : "test.foo" , key : { name : 1 } } );
@@ -37,12 +37,12 @@ secondary = s.getOther( primary ).getDB( "test" );
assert.eq( 1 , s.config.chunks.count() , "sanity check A" );
-db.foo.save( { _id : 1 , name : "eliot" } )
-db.foo.save( { _id : 2 , name : "sara" } )
-db.foo.save( { _id : 3 , name : "bob" } )
-db.foo.save( { _id : 4 , name : "joe" } )
-db.foo.save( { _id : 5 , name : "mark" } )
-db.foo.save( { _id : 6 , name : "allan" } )
+db.foo.save( { _id : 1 , name : "eliot" } );
+db.foo.save( { _id : 2 , name : "sara" } );
+db.foo.save( { _id : 3 , name : "bob" } );
+db.foo.save( { _id : 4 , name : "joe" } );
+db.foo.save( { _id : 5 , name : "mark" } );
+db.foo.save( { _id : 6 , name : "allan" } );
assert.eq( 6 , db.foo.find().count() , "basic count" );
@@ -53,7 +53,7 @@ s.adminCommand({ split: "test.foo", middle: { name: "eliot" }});
// MINKEY->allan,bob->eliot,joe,mark->sara,MAXKEY
-s.printChunks()
+s.printChunks();
// part 3
assert.eq( 6 , db.foo.find().count() , "basic count after split " );
@@ -106,15 +106,15 @@ assert.eq( 0 , db.foo.find().limit(100).skip(10).count(true) );
assert.eq( 0 , db.foo.find().limit(-100).skip(10).count(true) );
assert.eq( 0 , db.foo.find().limit(0).skip(10).count(true) );
-assert.eq( 2 , db.foo.find().limit(2).itcount() , "LS1" )
-assert.eq( 2 , db.foo.find().skip(2).limit(2).itcount() , "LS2" )
-assert.eq( 1 , db.foo.find().skip(5).limit(2).itcount() , "LS3" )
-assert.eq( 6 , db.foo.find().limit(2).count() , "LSC1" )
-assert.eq( 2 , db.foo.find().limit(2).size() , "LSC2" )
-assert.eq( 2 , db.foo.find().skip(2).limit(2).size() , "LSC3" )
-assert.eq( 1 , db.foo.find().skip(5).limit(2).size() , "LSC4" )
-assert.eq( 4 , db.foo.find().skip(1).limit(4).size() , "LSC5" )
-assert.eq( 5 , db.foo.find().skip(1).limit(6).size() , "LSC6" )
+assert.eq( 2 , db.foo.find().limit(2).itcount() , "LS1" );
+assert.eq( 2 , db.foo.find().skip(2).limit(2).itcount() , "LS2" );
+assert.eq( 1 , db.foo.find().skip(5).limit(2).itcount() , "LS3" );
+assert.eq( 6 , db.foo.find().limit(2).count() , "LSC1" );
+assert.eq( 2 , db.foo.find().limit(2).size() , "LSC2" );
+assert.eq( 2 , db.foo.find().skip(2).limit(2).size() , "LSC3" );
+assert.eq( 1 , db.foo.find().skip(5).limit(2).size() , "LSC4" );
+assert.eq( 4 , db.foo.find().skip(1).limit(4).size() , "LSC5" );
+assert.eq( 5 , db.foo.find().skip(1).limit(6).size() , "LSC6" );
// SERVER-3567 older negative limit tests
assert.eq( 2 , db.foo.find().limit(2).itcount() , "N1" );
@@ -136,23 +136,23 @@ function nameString( c ){
assert.eq( "allan,bob,eliot,joe,mark,sara" , nameString( db.foo.find().sort( { name : 1 } ) ) , "sort 1" );
assert.eq( "sara,mark,joe,eliot,bob,allan" , nameString( db.foo.find().sort( { name : -1 } ) ) , "sort 2" );
-assert.eq( "allan,bob" , nameString( db.foo.find().sort( { name : 1 } ).limit(2) ) , "LSD1" )
-assert.eq( "bob,eliot" , nameString( db.foo.find().sort( { name : 1 } ).skip(1).limit(2) ) , "LSD2" )
-assert.eq( "joe,mark" , nameString( db.foo.find().sort( { name : 1 } ).skip(3).limit(2) ) , "LSD3" )
+assert.eq( "allan,bob" , nameString( db.foo.find().sort( { name : 1 } ).limit(2) ) , "LSD1" );
+assert.eq( "bob,eliot" , nameString( db.foo.find().sort( { name : 1 } ).skip(1).limit(2) ) , "LSD2" );
+assert.eq( "joe,mark" , nameString( db.foo.find().sort( { name : 1 } ).skip(3).limit(2) ) , "LSD3" );
-assert.eq( "eliot,sara" , nameString( db.foo.find().sort( { _id : 1 } ).limit(2) ) , "LSE1" )
-assert.eq( "sara,bob" , nameString( db.foo.find().sort( { _id : 1 } ).skip(1).limit(2) ) , "LSE2" )
-assert.eq( "joe,mark" , nameString( db.foo.find().sort( { _id : 1 } ).skip(3).limit(2) ) , "LSE3" )
+assert.eq( "eliot,sara" , nameString( db.foo.find().sort( { _id : 1 } ).limit(2) ) , "LSE1" );
+assert.eq( "sara,bob" , nameString( db.foo.find().sort( { _id : 1 } ).skip(1).limit(2) ) , "LSE2" );
+assert.eq( "joe,mark" , nameString( db.foo.find().sort( { _id : 1 } ).skip(3).limit(2) ) , "LSE3" );
// part 6
for ( i=0; i<10; i++ ){
- db.foo.save( { _id : 7 + i , name : "zzz" + i } )
+ db.foo.save( { _id : 7 + i , name : "zzz" + i } );
}
-assert.eq( 10 , db.foo.find( { name : { $gt : "z" } } ).itcount() , "LSF1" )
-assert.eq( 10 , db.foo.find( { name : { $gt : "z" } } ).sort( { _id : 1 } ).itcount() , "LSF2" )
-assert.eq( 5 , db.foo.find( { name : { $gt : "z" } } ).sort( { _id : 1 } ).skip(5).itcount() , "LSF3" )
-assert.eq( 3 , db.foo.find( { name : { $gt : "z" } } ).sort( { _id : 1 } ).skip(5).limit(3).itcount() , "LSF4" )
+assert.eq( 10 , db.foo.find( { name : { $gt : "z" } } ).itcount() , "LSF1" );
+assert.eq( 10 , db.foo.find( { name : { $gt : "z" } } ).sort( { _id : 1 } ).itcount() , "LSF2" );
+assert.eq( 5 , db.foo.find( { name : { $gt : "z" } } ).sort( { _id : 1 } ).skip(5).itcount() , "LSF3" );
+assert.eq( 3 , db.foo.find( { name : { $gt : "z" } } ).sort( { _id : 1 } ).skip(5).limit(3).itcount() , "LSF4" );
// part 7
// Make sure count command returns error for invalid queries
diff --git a/jstests/sharding/count2.js b/jstests/sharding/count2.js
index 7361359791d..387bee9f8e3 100644
--- a/jstests/sharding/count2.js
+++ b/jstests/sharding/count2.js
@@ -14,29 +14,29 @@ var db2 = s2.getDB( "test" ).foo;
assert.eq( 1, s1.config.chunks.count(), "sanity check A");
-db1.save( { name : "aaa" } )
-db1.save( { name : "bbb" } )
-db1.save( { name : "ccc" } )
-db1.save( { name : "ddd" } )
-db1.save( { name : "eee" } )
-db1.save( { name : "fff" } )
+db1.save( { name : "aaa" } );
+db1.save( { name : "bbb" } );
+db1.save( { name : "ccc" } );
+db1.save( { name : "ddd" } );
+db1.save( { name : "eee" } );
+db1.save( { name : "fff" } );
s1.adminCommand( { split : "test.foo" , middle : { name : "ddd" } } );
assert.eq( 3, db1.count( { name : { $gte: "aaa" , $lt: "ddd" } } ) , "initial count mongos1" );
assert.eq( 3, db2.count( { name : { $gte: "aaa" , $lt: "ddd" } } ) , "initial count mongos2" );
-s1.printChunks( "test.foo" )
+s1.printChunks( "test.foo" );
s1.adminCommand( { movechunk : "test.foo" , find : { name : "aaa" } , to : s1.getOther( s1.getServer( "test" ) ).name, _waitForDelete : true });
assert.eq( 3, db1.count( { name : { $gte: "aaa" , $lt: "ddd" } } ) , "post count mongos1" );
// The second mongos still thinks its shard mapping is valid and accepts a cound
-print( "before sleep: " + Date() )
-sleep( 2000 )
-print( "after sleep: " + Date() )
-s1.printChunks( "test.foo" )
+print( "before sleep: " + Date() );
+sleep( 2000 );
+print( "after sleep: " + Date() );
+s1.printChunks( "test.foo" );
assert.eq( 3, db2.find( { name : { $gte: "aaa" , $lt: "ddd" } } ).count() , "post count mongos2" );
db2.findOne();
diff --git a/jstests/sharding/count_slaveok.js b/jstests/sharding/count_slaveok.js
index 410e2e4a4f3..eaf39a18352 100644
--- a/jstests/sharding/count_slaveok.js
+++ b/jstests/sharding/count_slaveok.js
@@ -24,49 +24,49 @@ for( var i = 0; i < 300; i++ ){
}
assert.writeOK(bulk.execute());
-var connA = conn
-var connB = new Mongo( st.s.host )
-var connC = new Mongo( st.s.host )
+var connA = conn;
+var connB = new Mongo( st.s.host );
+var connC = new Mongo( st.s.host );
-st.printShardingStatus()
+st.printShardingStatus();
// Wait for client to update itself and replication to finish
-rst.awaitReplication()
+rst.awaitReplication();
-var primary = rst.getPrimary()
-var sec = rst.getSecondary()
+var primary = rst.getPrimary();
+var sec = rst.getSecondary();
// Data now inserted... stop the master, since only two in set, other will still be secondary
rst.stop(rst.getPrimary());
-printjson( rst.status() )
+printjson( rst.status() );
// Wait for the mongos to recognize the slave
-ReplSetTest.awaitRSClientHosts( conn, sec, { ok : true, secondary : true } )
+ReplSetTest.awaitRSClientHosts( conn, sec, { ok : true, secondary : true } );
// Make sure that mongos realizes that primary is already down
ReplSetTest.awaitRSClientHosts( conn, primary, { ok : false });
// Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when
// master is down
-conn.setSlaveOk()
+conn.setSlaveOk();
// count using the command path
-assert.eq( 30, coll.find({ i : 0 }).count() )
+assert.eq( 30, coll.find({ i : 0 }).count() );
// count using the query path
assert.eq( 30, coll.find({ i : 0 }).itcount() );
-assert.eq( 10, coll.distinct("i").length )
+assert.eq( 10, coll.distinct("i").length );
try {
- conn.setSlaveOk( false )
+ conn.setSlaveOk( false );
// Should throw exception, since not slaveOk'd
- coll.find({ i : 0 }).count()
+ coll.find({ i : 0 }).count();
- print( "Should not reach here!" )
- assert( false )
+ print( "Should not reach here!" );
+ assert( false );
}
catch( e ){
- print( "Non-slaveOk'd connection failed." )
+ print( "Non-slaveOk'd connection failed." );
}
st.stop();
diff --git a/jstests/sharding/csrs_upgrade_during_migrate.js b/jstests/sharding/csrs_upgrade_during_migrate.js
index c1fac8eeff6..f094f617188 100644
--- a/jstests/sharding/csrs_upgrade_during_migrate.js
+++ b/jstests/sharding/csrs_upgrade_during_migrate.js
@@ -30,7 +30,7 @@ var st;
conf.members[i].slaveDelay = delayed ? 30 : 0;
}
reconfig(rst, conf);
- }
+ };
var coordinator = new CSRSUpgradeCoordinator();
coordinator.setupSCCCCluster();
diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js
index 7c83b79d742..336899a40c1 100644
--- a/jstests/sharding/cursor1.js
+++ b/jstests/sharding/cursor1.js
@@ -3,12 +3,12 @@
(function() {
var s = new ShardingTest({ name: "sharding_cursor1", shards: 2 });
-s.config.settings.find().forEach( printjson )
+s.config.settings.find().forEach( printjson );
// create a sharded 'test.foo', for the moment with just one chunk
s.adminCommand( { enablesharding: "test" } );
s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection: "test.foo", key: { _id: 1 } } )
+s.adminCommand( { shardcollection: "test.foo", key: { _id: 1 } } );
db = s.getDB( "test" );
primary = s.getServer( "test" ).getDB( "test" );
@@ -44,22 +44,22 @@ assert.eq( numObjs , cursor3.itcount() , "c3" );
// test timeout
gc(); gc();
-cur = db.foo.find().batchSize( 2 )
-assert( cur.next() , "T1" )
+cur = db.foo.find().batchSize( 2 );
+assert( cur.next() , "T1" );
assert( cur.next() , "T2" );
assert.commandWorked(s.admin.runCommand({
setParameter: 1,
cursorTimeoutMillis: 10000 // 10 seconds.
}));
before = db.serverStatus().metrics.cursor;
-printjson( before )
-sleep( 6000 )
-assert( cur.next() , "T3" )
+printjson( before );
+sleep( 6000 );
+assert( cur.next() , "T3" );
assert( cur.next() , "T4" );
-sleep( 24000 )
-assert.throws( function(){ cur.next(); } , null , "T5" )
+sleep( 24000 );
+assert.throws( function(){ cur.next(); } , null , "T5" );
after = db.serverStatus().metrics.cursor;
-gc(); gc()
+gc(); gc();
s.stop();
diff --git a/jstests/sharding/cursor_cleanup.js b/jstests/sharding/cursor_cleanup.js
index e0971db5e32..4eb200b87e3 100644
--- a/jstests/sharding/cursor_cleanup.js
+++ b/jstests/sharding/cursor_cleanup.js
@@ -52,7 +52,7 @@ jsTest.log("End the cursors.");
shardedCursor.itcount();
unshardedCursor.itcount();
-var cursorInfo = admin.serverStatus().metrics.cursor;;
+var cursorInfo = admin.serverStatus().metrics.cursor;
printjson(cursorInfo);
assert.eq(cursorInfo.open.multiTarget, 0);
diff --git a/jstests/sharding/drop_configdb.js b/jstests/sharding/drop_configdb.js
index 56351716527..8ac87648dfa 100644
--- a/jstests/sharding/drop_configdb.js
+++ b/jstests/sharding/drop_configdb.js
@@ -9,7 +9,7 @@ var getConfigsvrToWriteTo = function(st) {
} else {
return st._configServers[0];
}
-}
+};
var st = new ShardingTest({ shards : 2 });
var mongos = st.s;
@@ -17,15 +17,15 @@ var config = getConfigsvrToWriteTo(st).getDB('config');
// Try to drop config db via configsvr
-print ( "1: Try to drop config database via configsvr" )
+print ( "1: Try to drop config database via configsvr" );
assert.eq(0, config.dropDatabase().ok);
assert.eq("Cannot drop 'config' database if mongod started with --configsvr",
config.dropDatabase().errmsg);
// Try to drop config db via mongos
-var config = mongos.getDB( "config" )
+var config = mongos.getDB( "config" );
-print ( "1: Try to drop config database via mongos" )
+print ( "1: Try to drop config database via mongos" );
assert.eq(0, config.dropDatabase().ok);
// 20 = ErrorCodes::IllegalOperation
diff --git a/jstests/sharding/enable_sharding_basic.js b/jstests/sharding/enable_sharding_basic.js
index 741434b4d9e..eb58df59d57 100644
--- a/jstests/sharding/enable_sharding_basic.js
+++ b/jstests/sharding/enable_sharding_basic.js
@@ -50,6 +50,6 @@ assert.eq(mongos.getDB('config').databases.findOne({_id: 'unsharded'}).partition
st.stop();
-})()
+})();
diff --git a/jstests/sharding/explain_read_pref.js b/jstests/sharding/explain_read_pref.js
index 2f673e19498..eab0a190ad4 100644
--- a/jstests/sharding/explain_read_pref.js
+++ b/jstests/sharding/explain_read_pref.js
@@ -26,7 +26,7 @@ var assertCorrectTargeting = function(explain, isMongos, secExpected) {
else {
assert(isMaster.ismaster);
}
-}
+};
var testAllModes = function(conn, isMongos) {
diff --git a/jstests/sharding/features1.js b/jstests/sharding/features1.js
index 86dedddba72..0e52c514ee8 100644
--- a/jstests/sharding/features1.js
+++ b/jstests/sharding/features1.js
@@ -82,10 +82,10 @@ assert.eq( 1 , db.foo3.count() , "eval pre1" );
assert.eq( 1 , db.foo2.count() , "eval pre2" );
assert.eq( 8 , db.eval( function(){ return db.foo3.findOne().a; } ), "eval 1 " );
-assert.throws( function(){ db.eval( function(){ return db.foo2.findOne().a; } ) } , null , "eval 2" )
+assert.throws( function(){ db.eval( function(){ return db.foo2.findOne().a; } ); } , null , "eval 2" );
assert.eq( 1 , db.eval( function(){ return db.foo3.count(); } ), "eval 3 " );
-assert.throws( function(){ db.eval( function(){ return db.foo2.count(); } ) } , null , "eval 4" )
+assert.throws( function(){ db.eval( function(){ return db.foo2.count(); } ); } , null , "eval 4" );
// ----- "eval" new command name SERVER-5588 -----
var result;
@@ -116,11 +116,11 @@ assert.eq( 2 , b.foo4.getIndexes().length , "ub2" );
assert( a.foo4.getIndexes()[1].unique , "ua3" );
assert( b.foo4.getIndexes()[1].unique , "ub3" );
-assert.eq( 2 , db.foo4.count() , "uc1" )
-db.foo4.save( { num : 7 } )
-assert.eq( 3 , db.foo4.count() , "uc2" )
+assert.eq( 2 , db.foo4.count() , "uc1" );
+db.foo4.save( { num : 7 } );
+assert.eq( 3 , db.foo4.count() , "uc2" );
assert.writeError(db.foo4.save( { num : 7 } ));
-assert.eq( 3 , db.foo4.count() , "uc4" )
+assert.eq( 3 , db.foo4.count() , "uc4" );
// --- don't let you convertToCapped ----
assert( ! db.foo4.isCapped() , "ca1" );
@@ -173,7 +173,7 @@ assert.soon( function() {
return cmdRes.ok;
}, 'move chunk test.foo6', 60000, 1000 );
-assert.throws( function(){ db.foo6.group( { key : { a : 1 } , initial : { count : 0 } , reduce : function(z,prev){ prev.count++; } } ); } );;
+assert.throws( function(){ db.foo6.group( { key : { a : 1 } , initial : { count : 0 } , reduce : function(z,prev){ prev.count++; } } ); } );
// ---- can't shard non-empty collection without index -----
@@ -191,7 +191,7 @@ assert( ! s.admin.runCommand( { shardcollection : "test.foo9" , key : { a : 1 }
// --- listDatabases ---
-r = db.getMongo().getDBs()
+r = db.getMongo().getDBs();
assert.eq(2, r.databases.length, tojson(r));
assert.eq( "number", typeof(r.totalSize) , "listDatabases 2 : " + tojson( r ) );
diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js
index bf31e4448b2..b9dc4d6309c 100644
--- a/jstests/sharding/features2.js
+++ b/jstests/sharding/features2.js
@@ -63,9 +63,9 @@ assert.eq( 1 , s.onNumShards( "foo2" ) , "F1" );
printjson( db.foo2.getIndexes() );
s.adminCommand( { shardcollection : "test.foo2" , key : { _id : 1 } } );
-assert.eq( 3 , db.foo2.count() , "F2" )
+assert.eq( 3 , db.foo2.count() , "F2" );
db.foo2.insert( {} );
-assert.eq( 4 , db.foo2.count() , "F3" )
+assert.eq( 4 , db.foo2.count() , "F3" );
// --- map/reduce
@@ -126,7 +126,7 @@ doMR = function( n ){
assert.eq( 3 , z.b , "MR T9 " + n );
assert.eq( 3 , z.c , "MR TA " + n );
-}
+};
doMR( "before" );
@@ -147,20 +147,20 @@ cmd = { mapreduce : "mr" , map : "emit( " , reduce : "fooz + " , out : "broken1"
x = db.runCommand( cmd );
y = s._connections[0].getDB( "test" ).runCommand( cmd );
-printjson( x )
-printjson( y )
+printjson( x );
+printjson( y );
// count
-db.countaa.save({"regex" : /foo/i})
-db.countaa.save({"regex" : /foo/i})
-db.countaa.save({"regex" : /foo/i})
+db.countaa.save({"regex" : /foo/i});
+db.countaa.save({"regex" : /foo/i});
+db.countaa.save({"regex" : /foo/i});
assert.eq( 3 , db.countaa.count() , "counta1" );
assert.eq( 3 , db.countaa.find().itcount() , "counta1" );
x = null; y = null;
try {
- x = db.runCommand( "forceerror" )
+ x = db.runCommand( "forceerror" );
}
catch ( e ){
x = e;
@@ -180,8 +180,8 @@ catch ( e ){
// as then MongoS should set code 121 as well.
//
// assert.eq( x.code , y.code , "assert format" )
-assert.eq( x.errmsg , y.errmsg , "assert format" )
-assert.eq( x.ok , y.ok , "assert format" )
+assert.eq( x.errmsg , y.errmsg , "assert format" );
+assert.eq( x.ok , y.ok , "assert format" );
// isMaster and query-wrapped-command
isMaster = db.runCommand({isMaster:1});
diff --git a/jstests/sharding/findandmodify1.js b/jstests/sharding/findandmodify1.js
index a144eceed72..3a93610f57e 100644
--- a/jstests/sharding/findandmodify1.js
+++ b/jstests/sharding/findandmodify1.js
@@ -11,7 +11,7 @@ secondary = s.getOther( primary ).getDB( "test" );
numObjs = 20;
// Turn balancer off - with small numbers of chunks the balancer tries to correct all imbalances, not just < 8
-s.s.getDB( "config" ).settings.update({ _id : "balancer" }, { $set : { stopped : true } }, true )
+s.s.getDB( "config" ).settings.update({ _id : "balancer" }, { $set : { stopped : true } }, true );
s.adminCommand( { shardcollection : "test.stuff" , key : {_id:1} } );
diff --git a/jstests/sharding/forget_mr_temp_ns.js b/jstests/sharding/forget_mr_temp_ns.js
index 8962536abba..d972aa7dbe1 100644
--- a/jstests/sharding/forget_mr_temp_ns.js
+++ b/jstests/sharding/forget_mr_temp_ns.js
@@ -36,7 +36,7 @@ var checkForSeenNS = function( threadStats, regex ) {
assert( !( regex.test( seenNSes ) ) );
}
}
-}
+};
checkForSeenNS( mongodThreadStats, /^foo.tmp/ );
checkForSeenNS( mongosThreadStats, /^foo.tmp/ );
diff --git a/jstests/sharding/geo_near_random1.js b/jstests/sharding/geo_near_random1.js
index ea34d428a4d..bff63ca2b18 100644
--- a/jstests/sharding/geo_near_random1.js
+++ b/jstests/sharding/geo_near_random1.js
@@ -32,9 +32,9 @@ for (var i = (test.nPts/10); i < test.nPts; i+= (test.nPts/10)){
// Turn balancer back on, for actual tests
// s.startBalancer() // SERVER-13365
-printShardingSizes()
+printShardingSizes();
-var opts = {sharded: true}
+var opts = {sharded: true};
test.testPt([0,0], opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
diff --git a/jstests/sharding/geo_near_random2.js b/jstests/sharding/geo_near_random2.js
index 92652292272..45d40975d53 100644
--- a/jstests/sharding/geo_near_random2.js
+++ b/jstests/sharding/geo_near_random2.js
@@ -31,7 +31,7 @@ for (var i = (test.nPts/10); i < test.nPts; i+= (test.nPts/10)){
//Turn balancer back on, for actual tests
// s.startBalancer(); // SERVER-13365
-printShardingSizes()
+printShardingSizes();
opts = {sphere:0, nToTest:test.nPts*0.01, sharded:true};
test.testPt([0,0], opts);
@@ -40,7 +40,7 @@ test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
-opts.sphere = 1
+opts.sphere = 1;
test.testPt([0,0], opts);
test.testPt(test.mkPt(0.8), opts);
test.testPt(test.mkPt(0.8), opts);
diff --git a/jstests/sharding/geo_shardedgeonear.js b/jstests/sharding/geo_shardedgeonear.js
index 4c117bf8cef..d00c6a057a3 100644
--- a/jstests/sharding/geo_shardedgeonear.js
+++ b/jstests/sharding/geo_shardedgeonear.js
@@ -1,15 +1,15 @@
// SERVER-7906
-var coll = 'points'
+var coll = 'points';
function test(db, sharded, indexType) {
printjson(db);
db[coll].drop();
if (sharded) {
- var shards = []
+ var shards = [];
var config = shardedDB.getSiblingDB("config");
- config.shards.find().forEach(function(shard) { shards.push(shard._id) });
+ config.shards.find().forEach(function(shard) { shards.push(shard._id); });
shardedDB.adminCommand({shardCollection: shardedDB[coll].getFullName(), key: {rand:1}});
for (var i=1; i < 10; i++) {
@@ -33,7 +33,7 @@ function test(db, sharded, indexType) {
assert.commandWorked(db[coll].ensureIndex({ loc: indexType }));
- var queryPoint = [0,0]
+ var queryPoint = [0,0];
geoCmd = {geoNear: coll, near: queryPoint, spherical: true, includeLocs: true};
assert.commandWorked(db.runCommand(geoCmd), tojson({sharded: sharded, indexType: indexType}));
}
diff --git a/jstests/sharding/group_slaveok.js b/jstests/sharding/group_slaveok.js
index 3412e7da163..0c7175f8fed 100644
--- a/jstests/sharding/group_slaveok.js
+++ b/jstests/sharding/group_slaveok.js
@@ -42,13 +42,13 @@ conn.setSlaveOk();
// Should not throw exception, since slaveOk'd
assert.eq(10, coll.group({ key: { i: true } ,
- reduce: function(obj, ctx) { ctx.count += 1 },
- initial: { count: 0 } }).length)
+ reduce: function(obj, ctx) { ctx.count += 1; },
+ initial: { count: 0 } }).length);
try {
conn.setSlaveOk(false);
var res = coll.group({ key: { i: true },
- reduce: function(obj, ctx) { ctx.count += 1 },
+ reduce: function(obj, ctx) { ctx.count += 1; },
initial: { count: 0 } });
print("Should not reach here! Group result: " + tojson(res));
diff --git a/jstests/sharding/hash_basic.js b/jstests/sharding/hash_basic.js
index fc560b1bef2..d7599488695 100644
--- a/jstests/sharding/hash_basic.js
+++ b/jstests/sharding/hash_basic.js
@@ -2,7 +2,7 @@ var st = new ShardingTest({ shards: 2, chunkSize: 1 });
var testDB = st.s.getDB('test');
testDB.adminCommand({ enableSharding: 'test' });
-st.ensurePrimaryShard('test', 'shard0001');;
+st.ensurePrimaryShard('test', 'shard0001');
testDB.adminCommand({ shardCollection: 'test.user', key: { x: 'hashed' }});
var configDB = st.s.getDB('config');
diff --git a/jstests/sharding/hash_shard1.js b/jstests/sharding/hash_shard1.js
index 1f03a61bb33..b34179b7d6c 100644
--- a/jstests/sharding/hash_shard1.js
+++ b/jstests/sharding/hash_shard1.js
@@ -23,7 +23,7 @@ db.printShardingStatus();
// insert stuff
var numitems = 1000;
for(i = 0; i < numitems; i++ ){
- t.insert( { a: i } )
+ t.insert( { a: i } );
}
// check they all got inserted
assert.eq( t.find().count() , numitems , "count off after inserts" );
@@ -39,7 +39,7 @@ var res = db.adminCommand( { movechunk : ns ,
find : { a : 0 } ,
bounds : [ chunk.min , chunk.max ] ,
to: "shard0000" } );
-assert.eq( res.ok , 0 , "moveChunk shouldn't work with invalid specification method")
+assert.eq( res.ok , 0 , "moveChunk shouldn't work with invalid specification method");
// now move a chunk using the lower/upper bound method. should work.
var res = db.adminCommand( { movechunk : ns ,
@@ -61,4 +61,4 @@ assert.eq( res.ok , 1 , "movechunk using find query didn't work" );
assert.eq( t.find().itcount() , numitems , "count off after migrate" );
printjson( t.find().explain() );
-s.stop()
+s.stop();
diff --git a/jstests/sharding/hash_shard_unique_compound.js b/jstests/sharding/hash_shard_unique_compound.js
index 832cb93600f..eab3fd92c6c 100644
--- a/jstests/sharding/hash_shard_unique_compound.js
+++ b/jstests/sharding/hash_shard_unique_compound.js
@@ -24,11 +24,11 @@ db.printShardingStatus();
// Create unique index
assert.commandWorked(coll.ensureIndex({ a: 1, b: 1 }, { unique: true }));
-jsTest.log("------ indexes -------")
+jsTest.log("------ indexes -------");
jsTest.log(tojson(coll.getIndexes()));
// Second Part
-jsTest.log("------ dropping sharded collection to start part 2 -------")
+jsTest.log("------ dropping sharded collection to start part 2 -------");
coll.drop();
//Create unique index
@@ -42,4 +42,4 @@ db.printShardingStatus();
jsTest.log("------ indexes 2-------");
jsTest.log(tojson(coll.getIndexes()));
-s.stop()
+s.stop();
diff --git a/jstests/sharding/index1.js b/jstests/sharding/index1.js
index 57092b3a566..7f0cea49cf0 100644
--- a/jstests/sharding/index1.js
+++ b/jstests/sharding/index1.js
@@ -6,8 +6,8 @@ var s = new ShardingTest({ name: "shard_index", shards: 2, mongos: 1 });
// Regenerate fully because of SERVER-2782
for ( var i = 0; i < 22; i++ ) {
- var coll = s.admin._mongo.getDB( "test" ).getCollection( "foo" + i )
- coll.drop()
+ var coll = s.admin._mongo.getDB( "test" ).getCollection( "foo" + i );
+ coll.drop();
var bulk = coll.initializeUnorderedBulkOp();
for ( var j = 0; j < 300; j++ ) {
@@ -20,150 +20,150 @@ for ( var i = 0; i < 22; i++ ) {
s.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
}
- print("\n\n\n\n\nTest # " + i)
+ print("\n\n\n\n\nTest # " + i);
if ( i == 0 ) {
// Unique index exists, but not the right one.
- coll.ensureIndex( { num : 1 }, { unique : true } )
- coll.ensureIndex( { x : 1 } )
+ coll.ensureIndex( { num : 1 }, { unique : true } );
+ coll.ensureIndex( { x : 1 } );
- passed = false
+ passed = false;
try {
- s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } )
- passed = true
+ s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } );
+ passed = true;
} catch (e) {
- print( e )
+ print( e );
}
- assert( !passed, "Should not shard collection when another unique index exists!")
+ assert( !passed, "Should not shard collection when another unique index exists!");
}
if ( i == 1 ) {
// Unique index exists as prefix, also index exists
- coll.ensureIndex( { x : 1 } )
- coll.ensureIndex( { x : 1, num : 1 }, { unique : true } )
+ coll.ensureIndex( { x : 1 } );
+ coll.ensureIndex( { x : 1, num : 1 }, { unique : true } );
try{
- s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } })
+ s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } });
}
catch(e){
- print(e)
- assert( false, "Should be able to shard non-unique index without unique option.")
+ print(e);
+ assert( false, "Should be able to shard non-unique index without unique option.");
}
}
if ( i == 2 ) {
// Non-unique index exists as prefix, also index exists. No unique index.
- coll.ensureIndex( { x : 1 } )
- coll.ensureIndex( { x : 1, num : 1 } )
+ coll.ensureIndex( { x : 1 } );
+ coll.ensureIndex( { x : 1, num : 1 } );
passed = false;
try{
- s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } })
+ s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } });
passed = true;
}
catch( e ){
- print(e)
- assert( !passed, "Should be able to shard collection with no unique index if unique not specified.")
+ print(e);
+ assert( !passed, "Should be able to shard collection with no unique index if unique not specified.");
}
}
if ( i == 3 ) {
// Unique index exists as prefix, also unique index exists
- coll.ensureIndex( { num : 1 }, { unique : true })
- coll.ensureIndex( { num : 1 , x : 1 }, { unique : true } )
+ coll.ensureIndex( { num : 1 }, { unique : true });
+ coll.ensureIndex( { num : 1 , x : 1 }, { unique : true } );
try{
- s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true })
+ s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true });
}
catch( e ){
- print(e)
- assert( false, "Should be able to shard collection with unique prefix index.")
+ print(e);
+ assert( false, "Should be able to shard collection with unique prefix index.");
}
}
if ( i == 4 ) {
// Unique index exists as id, also unique prefix index exists
- coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } )
+ coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } );
try{
- s.adminCommand({ shardcollection : "" + coll, key : { _id : 1 }, unique : true })
+ s.adminCommand({ shardcollection : "" + coll, key : { _id : 1 }, unique : true });
}
catch( e ){
- print(e)
- assert( false, "Should be able to shard collection with unique id index.")
+ print(e);
+ assert( false, "Should be able to shard collection with unique id index.");
}
}
if ( i == 5 ) {
// Unique index exists as id, also unique prefix index exists
- coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } )
+ coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } );
try{
- s.adminCommand({ shardcollection : "" + coll, key : { _id : 1, num : 1 }, unique : true })
+ s.adminCommand({ shardcollection : "" + coll, key : { _id : 1, num : 1 }, unique : true });
}
catch( e ){
- print(e)
- assert( false, "Should be able to shard collection with unique combination id index.")
+ print(e);
+ assert( false, "Should be able to shard collection with unique combination id index.");
}
}
if ( i == 6 ) {
- coll.remove({})
+ coll.remove({});
// Unique index does not exist, also unique prefix index exists
- coll.ensureIndex( { num : 1, _id : 1 }, { unique : true } )
+ coll.ensureIndex( { num : 1, _id : 1 }, { unique : true } );
try{
- s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true })
+ s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true });
}
catch( e ){
- print(e)
- assert( false, "Should be able to shard collection with no unique index but with a unique prefix index.")
+ print(e);
+ assert( false, "Should be able to shard collection with no unique index but with a unique prefix index.");
}
- printjson( coll.getIndexes() )
+ printjson( coll.getIndexes() );
// Make sure the index created is unique!
assert.eq( 1, coll.getIndexes().filter( function(z) { return friendlyEqual( z.key, { num : 1 } ) && z.unique; } ).length );
}
if ( i == 7 ) {
- coll.remove({})
+ coll.remove({});
// No index exists
try{
- assert.eq( coll.find().itcount(), 0 )
- s.adminCommand({ shardcollection : "" + coll, key : { num : 1 } })
+ assert.eq( coll.find().itcount(), 0 );
+ s.adminCommand({ shardcollection : "" + coll, key : { num : 1 } });
}
catch( e ){
- print(e)
- assert( false, "Should be able to shard collection with no index on shard key.")
+ print(e);
+ assert( false, "Should be able to shard collection with no index on shard key.");
}
}
if ( i == 8 ) {
- coll.remove({})
+ coll.remove({});
// No index exists
- passed = false
+ passed = false;
try{
- assert.eq( coll.find().itcount(), 0 )
- s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true })
- passed = true
+ assert.eq( coll.find().itcount(), 0 );
+ s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true });
+ passed = true;
}
catch( e ){
- print(e)
+ print(e);
}
- assert( passed, "Should be able to shard collection with unique flag but with no unique index on shard key, if coll empty.")
+ assert( passed, "Should be able to shard collection with unique flag but with no unique index on shard key, if coll empty.");
- printjson( coll.getIndexes() )
+ printjson( coll.getIndexes() );
// Make sure the index created is unique!
assert.eq( 1, coll.getIndexes().filter( function(z) { return friendlyEqual( z.key, { num : 1 } ) && z.unique; } ).length );
@@ -171,17 +171,17 @@ for ( var i = 0; i < 22; i++ ) {
if ( i == 9 ) {
// Unique index exists on a different field as well
- coll.ensureIndex( { num : 1 }, { unique : true } )
- coll.ensureIndex( { x : 1 } )
+ coll.ensureIndex( { num : 1 }, { unique : true } );
+ coll.ensureIndex( { x : 1 } );
- passed = false
+ passed = false;
try {
- s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } )
- passed = true
+ s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } );
+ passed = true;
} catch (e) {
- print( e )
+ print( e );
}
- assert( !passed, "Should not shard collection when another unique index exists!" )
+ assert( !passed, "Should not shard collection when another unique index exists!" );
}
if ( i == 10 ){
@@ -214,7 +214,7 @@ for ( var i = 0; i < 22; i++ ) {
assert.eq( 2, coll.getIndexes().length );
}
if ( i == 11 ){
- coll.remove({})
+ coll.remove({});
//empty collection with useful index. check new index not created
coll.ensureIndex( {num : 1, x : 1} );
@@ -297,7 +297,7 @@ for ( var i = 0; i < 22; i++ ) {
if ( i == 16 ) {
// create hashed index, but try to declare it unique when sharding
- coll.ensureIndex( { num : "hashed"} )
+ coll.ensureIndex( { num : "hashed"} );
passed = false;
try{
diff --git a/jstests/sharding/inserts_consistent.js b/jstests/sharding/inserts_consistent.js
index 3c6a562b289..7da92837ad7 100644
--- a/jstests/sharding/inserts_consistent.js
+++ b/jstests/sharding/inserts_consistent.js
@@ -1,84 +1,84 @@
// Test write re-routing on version mismatch.
-var st = new ShardingTest({ shards : 2, mongos : 2, verbose : 2 })
+var st = new ShardingTest({ shards : 2, mongos : 2, verbose : 2 });
-jsTest.log( "Doing test setup..." )
+jsTest.log( "Doing test setup..." );
// Stop balancer, since it'll just get in the way of this
-st.stopBalancer()
+st.stopBalancer();
-var mongos = st.s
-var admin = mongos.getDB("admin")
-var config = mongos.getDB("config")
-var coll = st.s.getCollection( jsTest.name() + ".coll" )
+var mongos = st.s;
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var coll = st.s.getCollection( jsTest.name() + ".coll" );
-st.shardColl( coll, { _id : 1 }, { _id : 0 }, false )
+st.shardColl( coll, { _id : 1 }, { _id : 0 }, false );
-jsTest.log( "Refreshing second mongos..." )
+jsTest.log( "Refreshing second mongos..." );
-var mongosB = st.s1
-var adminB = mongosB.getDB("admin")
-var collB = mongosB.getCollection( coll + "" )
+var mongosB = st.s1;
+var adminB = mongosB.getDB("admin");
+var collB = mongosB.getCollection( coll + "" );
// Make sure mongosB knows about the coll
-assert.eq( 0, collB.find().itcount() )
+assert.eq( 0, collB.find().itcount() );
// printjson( adminB.runCommand({ flushRouterConfig : 1 }) )
-jsTest.log( "Moving chunk to create stale mongos..." )
+jsTest.log( "Moving chunk to create stale mongos..." );
-var otherShard = config.chunks.findOne({ _id : sh._collRE( coll ) }).shard
+var otherShard = config.chunks.findOne({ _id : sh._collRE( coll ) }).shard;
for( var i = 0; i < st._shardNames.length; i++ ){
if( otherShard != st._shardNames[i] ){
- otherShard = st._shardNames[i]
- break
+ otherShard = st._shardNames[i];
+ break;
}
}
-print( "Other shard : " + otherShard )
+print( "Other shard : " + otherShard );
-printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : otherShard }) )
+printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : otherShard }) );
-jsTest.log( "Inserting docs that needs to be retried..." )
+jsTest.log( "Inserting docs that needs to be retried..." );
-var nextId = -1
+var nextId = -1;
for( var i = 0; i < 2; i++ ){
- printjson( "Inserting " + nextId )
+ printjson( "Inserting " + nextId );
assert.writeOK(collB.insert({ _id : nextId--, hello : "world" }));
}
-jsTest.log( "Inserting doc which successfully goes through..." )
+jsTest.log( "Inserting doc which successfully goes through..." );
// Do second write
-assert.writeOK(collB.insert({ _id : nextId--, goodbye : "world" }))
+assert.writeOK(collB.insert({ _id : nextId--, goodbye : "world" }));
// Assert that write went through
-assert.eq( coll.find().itcount(), 3 )
+assert.eq( coll.find().itcount(), 3 );
-jsTest.log( "Now try moving the actual chunk we're writing to..." )
+jsTest.log( "Now try moving the actual chunk we're writing to..." );
// Now move the actual chunk we're writing to
-printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : -1 }, to : otherShard }) )
+printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : -1 }, to : otherShard }) );
-jsTest.log( "Inserting second docs to get written back..." )
+jsTest.log( "Inserting second docs to get written back..." );
// Will fail entirely if too many of these, waiting for write to get applied can get too long.
for( var i = 0; i < 2; i++ ){
- collB.insert({ _id : nextId--, hello : "world" })
+ collB.insert({ _id : nextId--, hello : "world" });
}
// Refresh server
-printjson( adminB.runCommand({ flushRouterConfig : 1 }) )
+printjson( adminB.runCommand({ flushRouterConfig : 1 }) );
-jsTest.log( "Inserting second doc which successfully goes through..." )
+jsTest.log( "Inserting second doc which successfully goes through..." );
// Do second write
assert.writeOK(collB.insert({ _id : nextId--, goodbye : "world" }));
-jsTest.log( "All docs written this time!" )
+jsTest.log( "All docs written this time!" );
// Assert that writes went through.
-assert.eq( coll.find().itcount(), 6 )
+assert.eq( coll.find().itcount(), 6 );
-jsTest.log( "DONE" )
+jsTest.log( "DONE" );
-st.stop()
+st.stop();
diff --git a/jstests/sharding/jumbo1.js b/jstests/sharding/jumbo1.js
index 02f18530533..4c0b3593022 100644
--- a/jstests/sharding/jumbo1.js
+++ b/jstests/sharding/jumbo1.js
@@ -11,9 +11,9 @@ s.adminCommand( { shardcollection : "test.foo" , key : { x : 1 } } );
db = s.getDB( "test" );
-big = ""
+big = "";
while ( big.length < 10000 )
- big += "."
+ big += ".";
x = 0;
var bulk = db.foo.initializeUnorderedBulkOp();
@@ -28,26 +28,26 @@ for ( ; x < 2000; x++ )
assert.writeOK( bulk.execute() );
-sh.status(true)
+sh.status(true);
-res = sh.moveChunk( "test.foo" , { x : 0 } , "shard0001" )
+res = sh.moveChunk( "test.foo" , { x : 0 } , "shard0001" );
if ( ! res.ok )
- res = sh.moveChunk( "test.foo" , { x : 0 } , "shard0000" )
+ res = sh.moveChunk( "test.foo" , { x : 0 } , "shard0000" );
-sh.status(true)
+sh.status(true);
-sh.setBalancerState( true )
+sh.setBalancerState( true );
function diff1(){
var x = s.chunkCounts( "foo" );
- printjson( x )
+ printjson( x );
return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
}
assert.soon( function(){
var d = diff1();
print( "diff: " + d );
- sh.status(true)
+ sh.status(true);
return d < 5;
} , "balance didn't happen" , 1000 * 60 * 5 , 5000 );
diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js
index a37c55204c9..4d89c1fcf6e 100644
--- a/jstests/sharding/key_many.js
+++ b/jstests/sharding/key_many.js
@@ -123,7 +123,7 @@ for (var i = 0; i < types.length; i++) {
assert.eq(4, c.find({$nor:[makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]}).itcount(), curT.name + " $nor itcount()");
var stats = c.stats();
- printjson(stats)
+ printjson(stats);
assert.eq(6, stats.count, curT.name + " total count with stats()");
var count = 0;
diff --git a/jstests/sharding/key_string.js b/jstests/sharding/key_string.js
index 4c365fdf0d8..6e9f92d09b0 100644
--- a/jstests/sharding/key_string.js
+++ b/jstests/sharding/key_string.js
@@ -2,7 +2,7 @@
var s = new ShardingTest({ name: "keystring", shards: 2 });
-s.adminCommand( { enablesharding : "test" } )
+s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
s.adminCommand( { shardcollection : "test.foo" , key : { name : 1 } } );
@@ -13,12 +13,12 @@ assert.eq( 1 , s.config.chunks.count() , "sanity check A" );
var db = s.getDB( "test" );
-db.foo.save( { name : "eliot" } )
-db.foo.save( { name : "sara" } )
-db.foo.save( { name : "bob" } )
-db.foo.save( { name : "joe" } )
-db.foo.save( { name : "mark" } )
-db.foo.save( { name : "allan" } )
+db.foo.save( { name : "eliot" } );
+db.foo.save( { name : "sara" } );
+db.foo.save( { name : "bob" } );
+db.foo.save( { name : "joe" } );
+db.foo.save( { name : "mark" } );
+db.foo.save( { name : "allan" } );
assert.eq( 6 , db.foo.find().count() , "basic count" );
@@ -43,8 +43,8 @@ assert.eq( "sara,mark,joe,eliot,bob,allan" , db.foo.find().sort( { name : -1 }
// make sure we can't foce a split on an extreme key
// [allan->joe)
-assert.throws( function(){ s.adminCommand( { split : "test.foo" , middle : { name : "allan" } } ) } );
-assert.throws( function(){ s.adminCommand( { split : "test.foo" , middle : { name : "eliot" } } ) } );
+assert.throws( function(){ s.adminCommand( { split : "test.foo" , middle : { name : "allan" } } ); } );
+assert.throws( function(){ s.adminCommand( { split : "test.foo" , middle : { name : "eliot" } } ); } );
s.stop();
diff --git a/jstests/sharding/large_chunk.js b/jstests/sharding/large_chunk.js
index 613c30a1c1f..06333b3cec3 100644
--- a/jstests/sharding/large_chunk.js
+++ b/jstests/sharding/large_chunk.js
@@ -21,7 +21,7 @@ db = s.getDB( "test" );
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
-bigString = ""
+bigString = "";
while ( bigString.length < 10000 )
bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
@@ -42,7 +42,7 @@ primary = s.getServer( "test" ).getDB( "test" );
secondary = s.getOther( primary ).getDB( "test" );
// Make sure that we don't move that chunk if it goes past what we consider the maximum chunk size
-print("Checkpoint 1a")
+print("Checkpoint 1a");
max = 200 * 1024 * 1024;
assert.throws(function() {
s.adminCommand({ movechunk: "test.foo",
@@ -58,7 +58,7 @@ s.adminCommand( { movechunk : "test.foo" , find : { _id : 1 } , to : secondary.g
after = s.config.chunks.find().toArray();
assert.neq( before[0].shard , after[0].shard , "move chunk did not work" );
-s.config.changelog.find().forEach( printjson )
+s.config.changelog.find().forEach( printjson );
s.stop();
diff --git a/jstests/sharding/limit_push.js b/jstests/sharding/limit_push.js
index 53acb6583eb..b1583cc21b7 100644
--- a/jstests/sharding/limit_push.js
+++ b/jstests/sharding/limit_push.js
@@ -17,7 +17,7 @@ s.adminCommand( { shardcollection : "test.limit_push" , key : { x : 1 } } );
// Now split the and move the data between the shards
s.adminCommand( { split : "test.limit_push", middle : { x : 50 }} );
-s.adminCommand( { moveChunk: "test.limit_push", find : { x : 51}, to : s.getOther( s.getServer( "test" ) ).name, _waitForDelete : true })
+s.adminCommand( { moveChunk: "test.limit_push", find : { x : 51}, to : s.getOther( s.getServer( "test" ) ).name, _waitForDelete : true });
// Check that the chunck have split correctly
assert.eq( 2 , s.config.chunks.count() , "wrong number of chunks");
@@ -34,7 +34,7 @@ assert.eq( 60 , db.limit_push.find( q ).count() , "Did not find 60 documents" );
// Now make sure that the explain shos that each shard is returning a single document as indicated
// by the "n" element for each shard
exp = db.limit_push.find( q ).sort( { x:-1} ).limit(1).explain("executionStats");
-printjson( exp )
+printjson( exp );
var execStages = exp.executionStats.executionStages;
assert.eq("SHARD_MERGE_SORT", execStages.stage, "Expected SHARD_MERGE_SORT as root stage");
@@ -43,7 +43,7 @@ var k = 0;
for (var j in execStages.shards) {
assert.eq( 1 , execStages.shards[j].executionStages.nReturned,
"'n' is not 1 from shard000" + k.toString());
- k++
+ k++;
}
s.stop();
diff --git a/jstests/sharding/listDatabases.js b/jstests/sharding/listDatabases.js
index 2d26bb4e2d3..240081d642d 100644
--- a/jstests/sharding/listDatabases.js
+++ b/jstests/sharding/listDatabases.js
@@ -1,8 +1,8 @@
// tests that listDatabases doesn't show config db on a shard, even if it is there
-var test = new ShardingTest({shards: 1, mongos: 1, other: {chunksize:1}})
+var test = new ShardingTest({shards: 1, mongos: 1, other: {chunksize:1}});
-var mongos = test.s0
+var mongos = test.s0;
var mongod = test.shard0;
//grab the config db instance by name
@@ -46,20 +46,20 @@ dbInConfigEntryCheck(getDBSection(dbArray, "config"));
dbInConfigEntryCheck(getDBSection(dbArray, 'admin'));
//add doc in config/admin db on the shard
-mongod.getDB("config").foo.insert({_id:1})
-mongod.getDB("admin").foo.insert({_id:1})
+mongod.getDB("config").foo.insert({_id:1});
+mongod.getDB("admin").foo.insert({_id:1});
//add doc in admin db (via mongos)
-mongos.getDB("admin").foo.insert({_id:1})
+mongos.getDB("admin").foo.insert({_id:1});
//verify that the config db is not on a shard
res = mongos.adminCommand("listDatabases");
dbArray = res.databases;
//check config db
-assert(getDBSection(dbArray, "config"), "config db not found! 2")
-assert(!getDBSection(dbArray, "config").shards, "config db is on a shard! 2")
+assert(getDBSection(dbArray, "config"), "config db not found! 2");
+assert(!getDBSection(dbArray, "config").shards, "config db is on a shard! 2");
//check admin db
-assert(getDBSection(dbArray, "admin"), "admin db not found! 2")
-assert(!getDBSection(dbArray, "admin").shards, "admin db is on a shard! 2")
+assert(getDBSection(dbArray, "admin"), "admin db not found! 2");
+assert(!getDBSection(dbArray, "admin").shards, "admin db is on a shard! 2");
-test.stop()
+test.stop();
diff --git a/jstests/sharding/localhostAuthBypass.js b/jstests/sharding/localhostAuthBypass.js
index c9bb7bfdde6..857b33fad9e 100644
--- a/jstests/sharding/localhostAuthBypass.js
+++ b/jstests/sharding/localhostAuthBypass.js
@@ -36,7 +36,7 @@ var addShard = function(st, shouldPass) {
var findEmptyShard = function(st, ns) {
- var counts = st.chunkCounts( "foo" )
+ var counts = st.chunkCounts( "foo" );
for(shard in counts){
if(counts[shard] == 0) {
@@ -258,6 +258,6 @@ var runTest = function() {
extraShards.forEach(function(sh) {
MongoRunner.stopMongod(sh);
});
-}
+};
runTest();
diff --git a/jstests/sharding/major_version_check.js b/jstests/sharding/major_version_check.js
index a580be909e2..5bfacd59dfc 100644
--- a/jstests/sharding/major_version_check.js
+++ b/jstests/sharding/major_version_check.js
@@ -2,28 +2,28 @@
// Tests that only a correct major-version is needed to connect to a shard via mongos
//
-var st = new ShardingTest({ shards : 1, mongos : 2 })
-st.stopBalancer()
+var st = new ShardingTest({ shards : 1, mongos : 2 });
+st.stopBalancer();
-var mongos = st.s0
-var staleMongos = st.s1
-var admin = mongos.getDB( "admin" )
-var config = mongos.getDB( "config" )
-var coll = mongos.getCollection( "foo.bar" )
+var mongos = st.s0;
+var staleMongos = st.s1;
+var admin = mongos.getDB( "admin" );
+var config = mongos.getDB( "config" );
+var coll = mongos.getCollection( "foo.bar" );
// Shard collection
-printjson( admin.runCommand({ enableSharding : coll.getDB() + "" }) )
-printjson( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }) )
+printjson( admin.runCommand({ enableSharding : coll.getDB() + "" }) );
+printjson( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }) );
// Make sure our stale mongos is up-to-date with no splits
-staleMongos.getCollection( coll + "" ).findOne()
+staleMongos.getCollection( coll + "" ).findOne();
// Run one split
-printjson( admin.runCommand({ split : coll + "", middle : { _id : 0 } }) )
+printjson( admin.runCommand({ split : coll + "", middle : { _id : 0 } }) );
// Make sure our stale mongos is not up-to-date with the split
-printjson( admin.runCommand({ getShardVersion : coll + "" }) )
-printjson( staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }) )
+printjson( admin.runCommand({ getShardVersion : coll + "" }) );
+printjson( staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }) );
// Compare strings b/c timestamp comparison is a bit weird
assert.eq( Timestamp( 1, 2 ),
@@ -32,20 +32,20 @@ assert.eq( Timestamp( 1, 0 ),
staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }).version );
// See if our stale mongos is required to catch up to run a findOne on an existing connection
-staleMongos.getCollection( coll + "" ).findOne()
+staleMongos.getCollection( coll + "" ).findOne();
-printjson( staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }) )
+printjson( staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }) );
assert.eq( Timestamp( 1, 0 ),
staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }).version );
// See if our stale mongos is required to catch up to run a findOne on a new connection
-staleMongos = new Mongo( staleMongos.host )
-staleMongos.getCollection( coll + "" ).findOne()
+staleMongos = new Mongo( staleMongos.host );
+staleMongos.getCollection( coll + "" ).findOne();
-printjson( staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }) )
+printjson( staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }) );
assert.eq( Timestamp( 1, 0 ),
staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }).version );
-st.stop() \ No newline at end of file
+st.stop(); \ No newline at end of file
diff --git a/jstests/sharding/mapReduce_inSharded.js b/jstests/sharding/mapReduce_inSharded.js
index 6fedd51b919..ff1c76a3534 100644
--- a/jstests/sharding/mapReduce_inSharded.js
+++ b/jstests/sharding/mapReduce_inSharded.js
@@ -4,16 +4,16 @@ var verifyOutput = function(out) {
assert.eq(out.counts.emit, 51200, "emit count is wrong");
assert.gt(out.counts.reduce, 99, "reduce count is wrong");
assert.eq(out.counts.output, 512, "output count is wrong");
-}
+};
var st = new ShardingTest({ shards : 2,
verbose : 1,
mongos : 1,
other : { chunkSize: 1, enableBalancer: true }});
-st.adminCommand( { enablesharding : "mrShard" } )
+st.adminCommand( { enablesharding : "mrShard" } );
st.ensurePrimaryShard('mrShard', 'shard0001');
-st.adminCommand( { shardcollection : "mrShard.srcSharded", key : { "_id" : 1 } } )
+st.adminCommand( { shardcollection : "mrShard.srcSharded", key : { "_id" : 1 } } );
var db = st.getDB( "mrShard" );
@@ -26,7 +26,7 @@ for (j = 0; j < 100; j++) {
assert.writeOK(bulk.execute());
function map() { emit(this.i, 1); }
-function reduce(key, values) { return Array.sum(values) }
+function reduce(key, values) { return Array.sum(values); }
// sharded src
var suffix = "InSharded";
diff --git a/jstests/sharding/mapReduce_inSharded_outSharded.js b/jstests/sharding/mapReduce_inSharded_outSharded.js
index 39fde3234e0..1cfce046732 100644
--- a/jstests/sharding/mapReduce_inSharded_outSharded.js
+++ b/jstests/sharding/mapReduce_inSharded_outSharded.js
@@ -4,16 +4,16 @@ var verifyOutput = function(out) {
assert.eq(out.counts.emit, 51200, "emit count is wrong");
assert.gt(out.counts.reduce, 99, "reduce count is wrong");
assert.eq(out.counts.output, 512, "output count is wrong");
-}
+};
var st = new ShardingTest({ shards : 2,
verbose : 1,
mongos : 1,
other : { chunkSize: 1, enableBalancer: true }});
-st.adminCommand( { enablesharding : "mrShard" } )
+st.adminCommand( { enablesharding : "mrShard" } );
st.ensurePrimaryShard('mrShard', 'shard0001');
-st.adminCommand( { shardcollection : "mrShard.srcSharded", key : { "_id" : 1 } } )
+st.adminCommand( { shardcollection : "mrShard.srcSharded", key : { "_id" : 1 } } );
var db = st.getDB( "mrShard" );
@@ -26,7 +26,7 @@ for (j = 0; j < 100; j++) {
assert.writeOK(bulk.execute());
function map() { emit(this.i, 1); }
-function reduce(key, values) { return Array.sum(values) }
+function reduce(key, values) { return Array.sum(values); }
// sharded src sharded dst
var suffix = "InShardedOutSharded";
diff --git a/jstests/sharding/mapReduce_nonSharded.js b/jstests/sharding/mapReduce_nonSharded.js
index 29b47dcbbbd..4e36335047b 100644
--- a/jstests/sharding/mapReduce_nonSharded.js
+++ b/jstests/sharding/mapReduce_nonSharded.js
@@ -4,16 +4,16 @@ var verifyOutput = function(out) {
assert.eq(out.counts.emit, 51200, "emit count is wrong");
assert.gt(out.counts.reduce, 99, "reduce count is wrong");
assert.eq(out.counts.output, 512, "output count is wrong");
-}
+};
var st = new ShardingTest({ shards : 2,
verbose : 1,
mongos : 1,
other : { chunkSize: 1, enableBalancer: true }});
-st.adminCommand( { enablesharding : "mrShard" } )
+st.adminCommand( { enablesharding : "mrShard" } );
st.ensurePrimaryShard('mrShard', 'shard0001');
-st.adminCommand( { shardcollection : "mrShard.srcSharded", key : { "_id" : 1 } } )
+st.adminCommand( { shardcollection : "mrShard.srcSharded", key : { "_id" : 1 } } );
var db = st.getDB( "mrShard" );
@@ -26,7 +26,7 @@ for (j = 0; j < 100; j++) {
assert.writeOK(bulk.execute());
function map() { emit(this.i, 1); }
-function reduce(key, values) { return Array.sum(values) }
+function reduce(key, values) { return Array.sum(values); }
// non-sharded in/out
var suffix = "";
diff --git a/jstests/sharding/mapReduce_outSharded.js b/jstests/sharding/mapReduce_outSharded.js
index 62ad05eb7b7..5ab50c4c877 100644
--- a/jstests/sharding/mapReduce_outSharded.js
+++ b/jstests/sharding/mapReduce_outSharded.js
@@ -4,16 +4,16 @@ var verifyOutput = function(out) {
assert.eq(out.counts.emit, 51200, "emit count is wrong");
assert.gt(out.counts.reduce, 99, "reduce count is wrong");
assert.eq(out.counts.output, 512, "output count is wrong");
-}
+};
var st = new ShardingTest({ shards : 2,
verbose : 1,
mongos : 1,
other : { chunkSize: 1, enableBalancer: true }});
-st.adminCommand( { enablesharding : "mrShard" } )
+st.adminCommand( { enablesharding : "mrShard" } );
st.ensurePrimaryShard('mrShard', 'shard0001');
-st.adminCommand( { shardcollection : "mrShard.srcSharded", key : { "_id" : 1 } } )
+st.adminCommand( { shardcollection : "mrShard.srcSharded", key : { "_id" : 1 } } );
var db = st.getDB( "mrShard" );
@@ -26,7 +26,7 @@ for (j = 0; j < 100; j++) {
assert.writeOK(bulk.execute());
function map() { emit(this.i, 1); }
-function reduce(key, values) { return Array.sum(values) }
+function reduce(key, values) { return Array.sum(values); }
// non sharded src sharded dst
var suffix = "OutSharded";
diff --git a/jstests/sharding/map_reduce_validation.js b/jstests/sharding/map_reduce_validation.js
index 9fe08edd91a..436ff395ece 100644
--- a/jstests/sharding/map_reduce_validation.js
+++ b/jstests/sharding/map_reduce_validation.js
@@ -13,17 +13,17 @@ assert.commandFailed(testDB.runCommand({ mapReduce: 'user',
testDB.bar.insert({i: 1});
assert.commandFailed(testDB.runCommand({ mapReduce: 'bar',
map: function() {emit(this.i, this.i*3);},
- reduce: function(key, values) {return Array.sum(values)},
+ reduce: function(key, values) {return Array.sum(values);},
out: { replace: "foo", db: "admin" }}));
assert.commandFailed(testDB.runCommand({ mapReduce: 'bar',
map: function() {emit(this.i, this.i*3);},
- reduce: function(key, values) {return Array.sum(values)},
+ reduce: function(key, values) {return Array.sum(values);},
out: { replace: "foo", db: "config" }}));
assert.commandWorked(testDB.runCommand({ mapReduce: 'bar',
map: function() {emit(this.i, this.i*3);},
- reduce: function(key, values) {return Array.sum(values)},
+ reduce: function(key, values) {return Array.sum(values);},
out: { replace: "foo", db: "test" }}));
st.stop();
diff --git a/jstests/sharding/max_time_ms_sharded.js b/jstests/sharding/max_time_ms_sharded.js
index 02a6d215c58..1a000ea3dde 100644
--- a/jstests/sharding/max_time_ms_sharded.js
+++ b/jstests/sharding/max_time_ms_sharded.js
@@ -25,7 +25,7 @@ var configureMaxTimeAlwaysTimeOut = function(mode) {
{configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
assert.commandWorked(shards[1].getDB("admin").runCommand(
{configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
-}
+};
// Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which prohibits mongod
// from enforcing time limits. See fail point declaration for complete description.
@@ -34,7 +34,7 @@ var configureMaxTimeNeverTimeOut = function(mode) {
{configureFailPoint: "maxTimeNeverTimeOut", mode: mode}));
assert.commandWorked(shards[1].getDB("admin").runCommand(
{configureFailPoint: "maxTimeNeverTimeOut", mode: mode}));
-}
+};
//
// Pre-split collection: shard 0 takes {_id: {$lt: 0}}, shard 1 takes {_id: {$gte: 0}}.
@@ -70,7 +70,7 @@ assert.eq(50, shards[1].getCollection(coll.getFullName()).count());
configureMaxTimeAlwaysTimeOut("alwaysOn");
cursor = coll.find();
cursor.maxTimeMS(60*1000);
-assert.throws(function() { cursor.next() },
+assert.throws(function() { cursor.next(); },
[],
"expected query to fail in mongod due to maxTimeAlwaysTimeOut fail point");
@@ -78,7 +78,7 @@ assert.throws(function() { cursor.next() },
configureMaxTimeAlwaysTimeOut("off");
cursor = coll.find();
cursor.maxTimeMS(60*1000);
-assert.doesNotThrow(function() { cursor.next() },
+assert.doesNotThrow(function() { cursor.next(); },
[],
"expected query to not hit time limit in mongod");
diff --git a/jstests/sharding/merge_chunks_basic.js b/jstests/sharding/merge_chunks_basic.js
index 58584f5a726..b8ad0040182 100644
--- a/jstests/sharding/merge_chunks_basic.js
+++ b/jstests/sharding/merge_chunks_basic.js
@@ -61,4 +61,4 @@ assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, min: {a: -1}, max: {a:
st.stop();
-})()
+})();
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index 534c8476565..81961d9c2f5 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -9,12 +9,12 @@ s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
s.adminCommand( { shardcollection : "test.foo" , key : { x : 1 } } );
-db = s.getDB( "test" )
-coll = db.foo
+db = s.getDB( "test" );
+coll = db.foo;
-big = ""
+big = "";
while ( big.length < 10000 )
- big += "eliot"
+ big += "eliot";
var bulk = coll.initializeUnorderedBulkOp();
for ( x=0; x<100; x++ ) {
@@ -22,27 +22,27 @@ for ( x=0; x<100; x++ ) {
}
assert.writeOK(bulk.execute());
-db.printShardingStatus()
+db.printShardingStatus();
-s.adminCommand( { split : "test.foo" , middle : { x : 30 } } )
-s.adminCommand( { split : "test.foo" , middle : { x : 66 } } )
-s.adminCommand( { movechunk : "test.foo" , find : { x : 90 } , to : s.getOther( s.getServer( "test" ) ).name } )
+s.adminCommand( { split : "test.foo" , middle : { x : 30 } } );
+s.adminCommand( { split : "test.foo" , middle : { x : 66 } } );
+s.adminCommand( { movechunk : "test.foo" , find : { x : 90 } , to : s.getOther( s.getServer( "test" ) ).name } );
-db.printShardingStatus()
+db.printShardingStatus();
-print( "YO : " + s.getServer( "test" ).host )
-direct = new Mongo( s.getServer( "test" ).host )
-print( "direct : " + direct )
+print( "YO : " + s.getServer( "test" ).host );
+direct = new Mongo( s.getServer( "test" ).host );
+print( "direct : " + direct );
-directDB = direct.getDB( "test" )
+directDB = direct.getDB( "test" );
for ( done=0; done<2*1024*1024; done+=big.length ){
assert.writeOK(directDB.foo.insert( { x : 50 + Math.random() , big : big } ));
}
-db.printShardingStatus()
+db.printShardingStatus();
-assert.throws( function(){ s.adminCommand( { movechunk : "test.foo" , find : { x : 50 } , to : s.getOther( s.getServer( "test" ) ).name } ); } , [] , "move should fail" )
+assert.throws( function(){ s.adminCommand( { movechunk : "test.foo" , find : { x : 50 } , to : s.getOther( s.getServer( "test" ) ).name } ); } , [] , "move should fail" );
for ( i=0; i<20; i+= 2 ) {
try {
@@ -55,11 +55,11 @@ for ( i=0; i<20; i+= 2 ) {
}
}
-db.printShardingStatus()
+db.printShardingStatus();
s.config.settings.update( { _id: "balancer" }, { $set : { stopped: false } } , true );
-assert.soon( function(){ var x = s.chunkDiff( "foo" , "test" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 8 * 60 * 1000 , 2000 )
+assert.soon( function(){ var x = s.chunkDiff( "foo" , "test" ); print( "chunk diff: " + x ); return x < 2; } , "no balance happened" , 8 * 60 * 1000 , 2000 );
assert.soon( function(){ return !s.isAnyBalanceInFlight(); } );
diff --git a/jstests/sharding/migrateBig_balancer.js b/jstests/sharding/migrateBig_balancer.js
index a46614a3699..5512eb883db 100644
--- a/jstests/sharding/migrateBig_balancer.js
+++ b/jstests/sharding/migrateBig_balancer.js
@@ -7,19 +7,19 @@ var mongos = st.s;
var admin = mongos.getDB("admin");
db = mongos.getDB("test");
-var coll = db.getCollection("stuff")
+var coll = db.getCollection("stuff");
assert.commandWorked(admin.runCommand({ enablesharding : coll.getDB().getName() }));
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-var data = "x"
-var nsq = 16
-var n = 255
+var data = "x";
+var nsq = 16;
+var n = 255;
-for( var i = 0; i < nsq; i++ ) data += data
+for( var i = 0; i < nsq; i++ ) data += data;
-dataObj = {}
-for( var i = 0; i < n; i++ ) dataObj["data-" + i] = data
+dataObj = {};
+for( var i = 0; i < n; i++ ) dataObj["data-" + i] = data;
var bulk = coll.initializeUnorderedBulkOp();
for( var i = 0; i < 40; i++ ) {
@@ -29,11 +29,11 @@ assert.writeOK(bulk.execute());
assert.eq( 40 , coll.count() , "prep1" );
-printjson( coll.stats() )
+printjson( coll.stats() );
-admin.printShardingStatus()
+admin.printShardingStatus();
-admin.runCommand({ shardcollection : "" + coll, key : { _id : 1 } })
+admin.runCommand({ shardcollection : "" + coll, key : { _id : 1 } });
assert.lt( 5 , mongos.getDB( "config" ).chunks.find( { ns : "test.stuff" } ).count() , "not enough chunks" );
diff --git a/jstests/sharding/migration_sets_fromMigrate_flag.js b/jstests/sharding/migration_sets_fromMigrate_flag.js
index 1be4a8324cc..c9143aac67c 100644
--- a/jstests/sharding/migration_sets_fromMigrate_flag.js
+++ b/jstests/sharding/migration_sets_fromMigrate_flag.js
@@ -163,4 +163,4 @@ assert.eq(1, recipientOplogRes, "fromMigrate flag wasn't set on the recipient sh
jsTest.log('DONE!');
st.stop();
-})()
+})();
diff --git a/jstests/sharding/migration_with_source_ops.js b/jstests/sharding/migration_with_source_ops.js
index 621b2de9b42..f518530c866 100644
--- a/jstests/sharding/migration_with_source_ops.js
+++ b/jstests/sharding/migration_with_source_ops.js
@@ -149,4 +149,4 @@ assert.eq(1, recipientCollUpdatedNum, "Update failed on recipient shard during m
jsTest.log('DONE!');
st.stop();
-})()
+})();
diff --git a/jstests/sharding/mongos_no_detect_sharding.js b/jstests/sharding/mongos_no_detect_sharding.js
index 5e4ab46a371..9313ee8bccd 100644
--- a/jstests/sharding/mongos_no_detect_sharding.js
+++ b/jstests/sharding/mongos_no_detect_sharding.js
@@ -5,27 +5,27 @@ var st = new ShardingTest({ name: "mongos_no_detect_sharding",
shards: 1,
mongos: 2 });
-var mongos = st.s
-var config = mongos.getDB("config")
+var mongos = st.s;
+var config = mongos.getDB("config");
-print( "Creating unsharded connection..." )
+print( "Creating unsharded connection..." );
-var mongos2 = st._mongos[1]
+var mongos2 = st._mongos[1];
-var coll = mongos2.getCollection( "test.foo" )
-coll.insert({ i : 0 })
+var coll = mongos2.getCollection( "test.foo" );
+coll.insert({ i : 0 });
-print( "Sharding collection..." )
+print( "Sharding collection..." );
-var admin = mongos.getDB("admin")
+var admin = mongos.getDB("admin");
-assert.eq( coll.getShardVersion().ok, 0 )
+assert.eq( coll.getShardVersion().ok, 0 );
-admin.runCommand({ enableSharding : "test" })
-admin.runCommand({ shardCollection : "test.foo", key : { _id : 1 } })
+admin.runCommand({ enableSharding : "test" });
+admin.runCommand({ shardCollection : "test.foo", key : { _id : 1 } });
-print( "Seeing if data gets inserted unsharded..." )
-print( "No splits occur here!" )
+print( "Seeing if data gets inserted unsharded..." );
+print( "No splits occur here!" );
// Insert a bunch of data which should trigger a split
var bulk = coll.initializeUnorderedBulkOp();
@@ -34,10 +34,10 @@ for( var i = 0; i < 100; i++ ){
}
assert.writeOK(bulk.execute());
-config.printShardingStatus( true )
+config.printShardingStatus( true );
-assert.eq( coll.getShardVersion().ok, 1 )
-assert.eq( 101, coll.find().itcount() )
+assert.eq( coll.getShardVersion().ok, 1 );
+assert.eq( 101, coll.find().itcount() );
st.stop();
diff --git a/jstests/sharding/mongos_validate_backoff.js b/jstests/sharding/mongos_validate_backoff.js
index 4faff61698d..ef057c04ef2 100644
--- a/jstests/sharding/mongos_validate_backoff.js
+++ b/jstests/sharding/mongos_validate_backoff.js
@@ -22,7 +22,7 @@ var timeBadInsert = function() {
var end = new Date().getTime();
return end - start;
-}
+};
// We need to work at least twice in order to check resetting the counter
var successNeeded = 2;
diff --git a/jstests/sharding/mongos_validate_writes.js b/jstests/sharding/mongos_validate_writes.js
index d54349f8d6c..b0843cd5cdb 100644
--- a/jstests/sharding/mongos_validate_writes.js
+++ b/jstests/sharding/mongos_validate_writes.js
@@ -5,38 +5,38 @@
//
var st = new ShardingTest({ shards : 2, mongos : 3, other : { shardOptions : { verbose : 2 } } });
-st.stopBalancer()
+st.stopBalancer();
-var mongos = st.s0
-var staleMongosA = st.s1
-var staleMongosB = st.s2
+var mongos = st.s0;
+var staleMongosA = st.s1;
+var staleMongosB = st.s2;
// Additional logging
-printjson( mongos.getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 }) )
-printjson( staleMongosA.getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 }) )
-printjson( staleMongosB.getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 }) )
-printjson( st._connections[0].getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 }) )
-printjson( st._connections[1].getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 }) )
-
-var admin = mongos.getDB( "admin" )
-var config = mongos.getDB( "config" )
-var coll = mongos.getCollection( "foo.bar" )
-var staleCollA = staleMongosA.getCollection( coll + "" )
-var staleCollB = staleMongosB.getCollection( coll + "" )
-
-printjson( admin.runCommand({ enableSharding : coll.getDB() + "" }) )
+printjson( mongos.getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 }) );
+printjson( staleMongosA.getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 }) );
+printjson( staleMongosB.getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 }) );
+printjson( st._connections[0].getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 }) );
+printjson( st._connections[1].getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 }) );
+
+var admin = mongos.getDB( "admin" );
+var config = mongos.getDB( "config" );
+var coll = mongos.getCollection( "foo.bar" );
+var staleCollA = staleMongosA.getCollection( coll + "" );
+var staleCollB = staleMongosB.getCollection( coll + "" );
+
+printjson( admin.runCommand({ enableSharding : coll.getDB() + "" }) );
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-coll.ensureIndex({ a : 1 })
-printjson( admin.runCommand({ shardCollection : coll + "", key : { a : 1 } }) )
+coll.ensureIndex({ a : 1 });
+printjson( admin.runCommand({ shardCollection : coll + "", key : { a : 1 } }) );
// Let the stale mongos see the collection state
-staleCollA.findOne()
-staleCollB.findOne()
+staleCollA.findOne();
+staleCollB.findOne();
// Change the collection sharding state
-coll.drop()
-coll.ensureIndex({ b : 1 })
-printjson( admin.runCommand({ shardCollection : coll + "", key : { b : 1 } }) )
+coll.drop();
+coll.ensureIndex({ b : 1 });
+printjson( admin.runCommand({ shardCollection : coll + "", key : { b : 1 } }) );
// Make sure that we can successfully insert, even though we have stale state
assert.writeOK(staleCollA.insert({ b : "b" }));
@@ -45,9 +45,9 @@ assert.writeOK(staleCollA.insert({ b : "b" }));
assert.writeError(staleCollB.insert({ a : "a" }));
// Change the collection sharding state
-coll.drop()
-coll.ensureIndex({ c : 1 })
-printjson( admin.runCommand({ shardCollection : coll + "", key : { c : 1 } }) )
+coll.drop();
+coll.ensureIndex({ c : 1 });
+printjson( admin.runCommand({ shardCollection : coll + "", key : { c : 1 } }) );
// Make sure we can successfully upsert, even though we have stale state
assert.writeOK(staleCollA.update({ c : "c" }, { c : "c" }, true ));
@@ -56,36 +56,36 @@ assert.writeOK(staleCollA.update({ c : "c" }, { c : "c" }, true ));
assert.writeError(staleCollB.update({ b : "b" }, { b : "b" }, true ));
// Change the collection sharding state
-coll.drop()
-coll.ensureIndex({ d : 1 })
-printjson( admin.runCommand({ shardCollection : coll + "", key : { d : 1 } }) )
+coll.drop();
+coll.ensureIndex({ d : 1 });
+printjson( admin.runCommand({ shardCollection : coll + "", key : { d : 1 } }) );
// Make sure we can successfully update, even though we have stale state
assert.writeOK(coll.insert({ d : "d" }));
assert.writeOK(staleCollA.update({ d : "d" }, { $set : { x : "x" } }, false, false ));
-assert.eq( staleCollA.findOne().x, "x" )
+assert.eq( staleCollA.findOne().x, "x" );
// Make sure we unsuccessfully update with old info
assert.writeError(staleCollB.update({ c : "c" }, { $set : { x : "y" } }, false, false ));
-assert.eq( staleCollB.findOne().x, "x" )
+assert.eq( staleCollB.findOne().x, "x" );
// Change the collection sharding state
-coll.drop()
-coll.ensureIndex({ e : 1 })
+coll.drop();
+coll.ensureIndex({ e : 1 });
// Deletes need to be across two shards to trigger an error - this is probably an exceptional case
-printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : "shard0000" }) )
-printjson( admin.runCommand({ shardCollection : coll + "", key : { e : 1 } }) )
-printjson( admin.runCommand({ split : coll + "", middle : { e : 0 } }) )
-printjson( admin.runCommand({ moveChunk : coll + "", find : { e : 0 }, to : "shard0001" }) )
+printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : "shard0000" }) );
+printjson( admin.runCommand({ shardCollection : coll + "", key : { e : 1 } }) );
+printjson( admin.runCommand({ split : coll + "", middle : { e : 0 } }) );
+printjson( admin.runCommand({ moveChunk : coll + "", find : { e : 0 }, to : "shard0001" }) );
// Make sure we can successfully remove, even though we have stale state
assert.writeOK(coll.insert({ e : "e" }));
assert.writeOK(staleCollA.remove({ e : "e" }, true));
-assert.eq( null, staleCollA.findOne() )
+assert.eq( null, staleCollA.findOne() );
// Make sure we unsuccessfully remove with old info
assert.writeError(staleCollB.remove({ d : "d" }, true ));
-st.stop()
+st.stop();
diff --git a/jstests/sharding/movePrimary1.js b/jstests/sharding/movePrimary1.js
index 242f58e5bcb..dd72dba7168 100644
--- a/jstests/sharding/movePrimary1.js
+++ b/jstests/sharding/movePrimary1.js
@@ -11,7 +11,7 @@ initDB = function( name ){
assert.eq( 3 , c.count() );
return s.getServer( name );
-}
+};
from = initDB( "test1" );
to = s.getOther( from );
diff --git a/jstests/sharding/move_chunk_basic.js b/jstests/sharding/move_chunk_basic.js
index aaf15b827f4..35703b6baa6 100644
--- a/jstests/sharding/move_chunk_basic.js
+++ b/jstests/sharding/move_chunk_basic.js
@@ -93,4 +93,4 @@ testNotHashed({a:1, b:1});
st.stop();
-})()
+})();
diff --git a/jstests/sharding/move_primary_basic.js b/jstests/sharding/move_primary_basic.js
index 5902fd8e63f..5e4a9f06a62 100644
--- a/jstests/sharding/move_primary_basic.js
+++ b/jstests/sharding/move_primary_basic.js
@@ -57,4 +57,4 @@ assert.eq(shard1, mongos.getDB('config').databases.findOne({_id: kDbName}).prima
st.stop();
-})()
+})();
diff --git a/jstests/sharding/movechunk_include.js b/jstests/sharding/movechunk_include.js
index d2ea5e31e62..e8821be922b 100644
--- a/jstests/sharding/movechunk_include.js
+++ b/jstests/sharding/movechunk_include.js
@@ -25,11 +25,11 @@ function setupMoveChunkTest(st) {
}
assert.writeOK(bulk.execute());
- var stats = st.chunkCounts( "foo" )
- var to = ""
+ var stats = st.chunkCounts( "foo" );
+ var to = "";
for ( shard in stats ){
if ( stats[shard] == 0 ) {
- to = shard
+ to = shard;
break;
}
}
@@ -37,5 +37,5 @@ function setupMoveChunkTest(st) {
find : { _id : 1 } ,
to : to ,
_waitForDelete : true} ); //some tests need this...
- assert(result, "movechunk failed: " + tojson( result ) )
+ assert(result, "movechunk failed: " + tojson( result ) );
}
diff --git a/jstests/sharding/movechunk_with_default_paranoia.js b/jstests/sharding/movechunk_with_default_paranoia.js
index 45626a74b97..250816a15b8 100644
--- a/jstests/sharding/movechunk_with_default_paranoia.js
+++ b/jstests/sharding/movechunk_with_default_paranoia.js
@@ -3,13 +3,13 @@
* be created).
*/
var st = new ShardingTest( { shards:2, mongos:1 , other : { chunkSize: 1 }});
-load("jstests/sharding/movechunk_include.js")
+load("jstests/sharding/movechunk_include.js");
setupMoveChunkTest(st);
var shards = [st.shard0, st.shard1];
for(i in shards) {
var dbpath = shards[i].adminCommand("getCmdLineOpts").parsed.storage.dbPath;
- var hasMoveChunkDir = 0 != ls(dbpath).filter(function(a) {return null != a.match("moveChunk")}).length
- assert(!hasMoveChunkDir, dbpath + ": has MoveChunk directory + " + ls(dbpath))
+ var hasMoveChunkDir = 0 != ls(dbpath).filter(function(a) {return null != a.match("moveChunk");}).length;
+ assert(!hasMoveChunkDir, dbpath + ": has MoveChunk directory + " + ls(dbpath));
}
-st.stop()
+st.stop();
diff --git a/jstests/sharding/movechunk_with_moveParanoia.js b/jstests/sharding/movechunk_with_moveParanoia.js
index a4d3aa51d55..f643e3aae0a 100644
--- a/jstests/sharding/movechunk_with_moveParanoia.js
+++ b/jstests/sharding/movechunk_with_moveParanoia.js
@@ -7,17 +7,17 @@ var st = new ShardingTest( { shards: 2,
chunkSize: 1,
shardOptions: { moveParanoia:"" }}});
-load("jstests/sharding/movechunk_include.js")
+load("jstests/sharding/movechunk_include.js");
setupMoveChunkTest(st);
var shards = [st.shard0, st.shard1];
var foundMoveChunk = false;
for(i in shards) {
var dbpath = shards[i].adminCommand("getCmdLineOpts").parsed.storage.dbPath;
- var hasMoveChunkDir = 0 != ls(dbpath).filter(function(a) {return null != a.match("moveChunk")}).length
+ var hasMoveChunkDir = 0 != ls(dbpath).filter(function(a) {return null != a.match("moveChunk");}).length;
foundMoveChunk = foundMoveChunk || hasMoveChunkDir;
}
-assert(foundMoveChunk, "did not find moveChunk directory!")
+assert(foundMoveChunk, "did not find moveChunk directory!");
-st.stop()
+st.stop();
diff --git a/jstests/sharding/movechunk_with_noMoveParanoia.js b/jstests/sharding/movechunk_with_noMoveParanoia.js
index 31a6f7298c1..0e2f6bc2248 100644
--- a/jstests/sharding/movechunk_with_noMoveParanoia.js
+++ b/jstests/sharding/movechunk_with_noMoveParanoia.js
@@ -7,13 +7,13 @@ var st = new ShardingTest( { shards: 2,
chunkSize: 1,
shardOptions: { noMoveParanoia:"" }}});
-load("jstests/sharding/movechunk_include.js")
+load("jstests/sharding/movechunk_include.js");
setupMoveChunkTest(st);
var shards = [st.shard0, st.shard1];
for(i in shards) {
var dbpath = shards[i].adminCommand("getCmdLineOpts").parsed.storage.dbPath;
- var hasMoveChunkDir = 0 != ls(dbpath).filter(function(a) {return null != a.match("moveChunk")}).length
- assert(!hasMoveChunkDir, dbpath + ": has MoveChunk directory + " + ls(dbpath))
+ var hasMoveChunkDir = 0 != ls(dbpath).filter(function(a) {return null != a.match("moveChunk");}).length;
+ assert(!hasMoveChunkDir, dbpath + ": has MoveChunk directory + " + ls(dbpath));
}
-st.stop()
+st.stop();
diff --git a/jstests/sharding/moveprimary_ignore_sharded.js b/jstests/sharding/moveprimary_ignore_sharded.js
index 6f61cbdf503..9b608279c77 100644
--- a/jstests/sharding/moveprimary_ignore_sharded.js
+++ b/jstests/sharding/moveprimary_ignore_sharded.js
@@ -1,17 +1,17 @@
// Checks that movePrimary doesn't move collections detected as sharded when it begins moving
-var st = new ShardingTest({ shards : 2, mongos : 2, verbose : 1 })
+var st = new ShardingTest({ shards : 2, mongos : 2, verbose : 1 });
// Stop balancer, otherwise mongosB may load information about the database non-deterministically
st.stopBalancer();
-var mongosA = st.s0
-var mongosB = st.s1
+var mongosA = st.s0;
+var mongosB = st.s1;
-var adminA = mongosA.getDB( "admin" )
-var adminB = mongosB.getDB( "admin" )
+var adminA = mongosA.getDB( "admin" );
+var adminB = mongosB.getDB( "admin" );
-var configA = mongosA.getDB( "config" )
-var configB = mongosB.getDB( "config" )
+var configA = mongosA.getDB( "config" );
+var configB = mongosB.getDB( "config" );
// Populate some data
assert.writeOK(mongosA.getCollection("foo.coll0").insert({ hello : "world" }));
@@ -37,18 +37,18 @@ printjson( adminA.runCommand({ shardCollection : "bar.coll1", key : { _id : 1 }
printjson( adminA.runCommand({ shardCollection : "bar.coll2", key : { _id : 1 } }) );
// All collections are now on primary shard
-var fooPrimaryShard = configA.databases.findOne({ _id : "foo" }).primary
-var barPrimaryShard = configA.databases.findOne({ _id : "bar" }).primary
+var fooPrimaryShard = configA.databases.findOne({ _id : "foo" }).primary;
+var barPrimaryShard = configA.databases.findOne({ _id : "bar" }).primary;
-var shards = configA.shards.find().toArray()
-var fooPrimaryShard = fooPrimaryShard == shards[0]._id ? shards[0] : shards[1]
-var fooOtherShard = fooPrimaryShard._id == shards[0]._id ? shards[1] : shards[0]
-var barPrimaryShard = barPrimaryShard == shards[0]._id ? shards[0] : shards[1]
-var barOtherShard = barPrimaryShard._id == shards[0]._id ? shards[1] : shards[0]
+var shards = configA.shards.find().toArray();
+var fooPrimaryShard = fooPrimaryShard == shards[0]._id ? shards[0] : shards[1];
+var fooOtherShard = fooPrimaryShard._id == shards[0]._id ? shards[1] : shards[0];
+var barPrimaryShard = barPrimaryShard == shards[0]._id ? shards[0] : shards[1];
+var barOtherShard = barPrimaryShard._id == shards[0]._id ? shards[1] : shards[0];
st.printShardingStatus();
-jsTest.log( "Running movePrimary for foo through mongosA ..." )
+jsTest.log( "Running movePrimary for foo through mongosA ..." );
// MongosA should already know about all the collection states
printjson( adminA.runCommand({ movePrimary : "foo", to : fooOtherShard._id }) );
diff --git a/jstests/sharding/mrShardedOutputAuth.js b/jstests/sharding/mrShardedOutputAuth.js
index c8ea6d490ad..a59367f158d 100644
--- a/jstests/sharding/mrShardedOutputAuth.js
+++ b/jstests/sharding/mrShardedOutputAuth.js
@@ -61,7 +61,7 @@ adminDb = authenticatedConn.getDB("admin");
var configDb = authenticatedConn.getDB("config");
-var inputDb = authenticatedConn.getDB("input")
+var inputDb = authenticatedConn.getDB("input");
inputDb.createUser({user: "user", pwd: "pass", roles: jsTest.basicUserRoles});
var outputDb = authenticatedConn.getDB("output");
diff --git a/jstests/sharding/mr_and_agg_versioning.js b/jstests/sharding/mr_and_agg_versioning.js
index 3b39f6b05e3..0167a23554d 100644
--- a/jstests/sharding/mr_and_agg_versioning.js
+++ b/jstests/sharding/mr_and_agg_versioning.js
@@ -1,6 +1,6 @@
// Test that map reduce and aggregate properly handle shard versioning.
(function() {
-"use strict"
+"use strict";
var st = new ShardingTest({shards: 2, mongos: 3});
@@ -34,14 +34,14 @@ st.shard1.getCollection(collName).insert({_id: numDocs, key: numKeys, value: num
jsTest.log("Doing mapReduce");
-var map = function(){ emit( this.key, this.value ) }
+var map = function(){ emit( this.key, this.value ); };
var reduce = function(k, values){
var total = 0;
for(var i = 0; i < values.length; i++) {
total += values[i];
}
return total;
-}
+};
function validateOutput(output) {
assert.eq(output.length, numKeys, tojson(output));
for(var i = 0; i < output.length; i++) {
diff --git a/jstests/sharding/mr_shard_version.js b/jstests/sharding/mr_shard_version.js
index 3ecebe0b7a5..481feb7f268 100644
--- a/jstests/sharding/mr_shard_version.js
+++ b/jstests/sharding/mr_shard_version.js
@@ -4,13 +4,13 @@
var st = new ShardingTest({ shards : 2, mongos : 1 });
//Stop balancer, since it'll just get in the way of these
-st.stopBalancer()
+st.stopBalancer();
-var coll = st.s.getCollection( jsTest.name() + ".coll" )
+var coll = st.s.getCollection( jsTest.name() + ".coll" );
-var numDocs = 50000
-var numKeys = 1000
-var numTests = 3
+var numDocs = 50000;
+var numKeys = 1000;
+var numTests = 3;
var bulk = coll.initializeUnorderedBulkOp();
for( var i = 0; i < numDocs; i++ ){
@@ -18,24 +18,24 @@ for( var i = 0; i < numDocs; i++ ){
}
assert.writeOK(bulk.execute());
-assert.eq( numDocs, coll.find().itcount() )
+assert.eq( numDocs, coll.find().itcount() );
-var halfId = coll.find().itcount() / 2
+var halfId = coll.find().itcount() / 2;
// Shard collection in half
-st.shardColl( coll, { _id : 1 }, { _id : halfId } )
+st.shardColl( coll, { _id : 1 }, { _id : halfId } );
-st.printShardingStatus()
+st.printShardingStatus();
-jsTest.log( "Collection now initialized with keys and values..." )
+jsTest.log( "Collection now initialized with keys and values..." );
-jsTest.log( "Starting migrations..." )
+jsTest.log( "Starting migrations..." );
-var migrateOp = { op : "command", ns : "admin", command : { moveChunk : "" + coll } }
+var migrateOp = { op : "command", ns : "admin", command : { moveChunk : "" + coll } };
-var checkMigrate = function(){ print( "Result of migrate : " ); printjson( this ) }
+var checkMigrate = function(){ print( "Result of migrate : " ); printjson( this ); };
-var ops = {}
+var ops = {};
for( var i = 0; i < st._connections.length; i++ ){
for( var j = 0; j < 2; j++ ){
ops[ "" + (i * 2 + j) ] = { op : "command", ns : "admin",
@@ -49,40 +49,40 @@ for( var i = 0; i < st._connections.length; i++ ){
var bid = benchStart({ ops : ops,
host : st.s.host,
parallel : 1,
- handleErrors : false })
+ handleErrors : false });
-jsTest.log( "Starting m/r..." )
+jsTest.log( "Starting m/r..." );
-var map = function(){ emit( this.key, this.value ) }
+var map = function(){ emit( this.key, this.value ); };
var reduce = function(k, values){
- var total = 0
- for( var i = 0; i < values.length; i++ ) total += values[i]
- return total
-}
+ var total = 0;
+ for( var i = 0; i < values.length; i++ ) total += values[i];
+ return total;
+};
-var outputColl = st.s.getCollection( jsTest.name() + ".mrOutput" )
+var outputColl = st.s.getCollection( jsTest.name() + ".mrOutput" );
-jsTest.log( "Output coll : " + outputColl )
+jsTest.log( "Output coll : " + outputColl );
for( var t = 0; t < numTests; t++ ){
- var results = coll.mapReduce( map, reduce, { out : { replace : outputColl.getName() } })
+ var results = coll.mapReduce( map, reduce, { out : { replace : outputColl.getName() } });
// Assert that the results are actually correct, all keys have values of (numDocs / numKeys) x key
- var output = outputColl.find().sort({ _id : 1 }).toArray()
+ var output = outputColl.find().sort({ _id : 1 }).toArray();
// printjson( output )
- assert.eq( output.length, numKeys )
- printjson( output )
+ assert.eq( output.length, numKeys );
+ printjson( output );
for( var i = 0; i < output.length; i++ )
- assert.eq( parseInt( output[i]._id ) * ( numDocs / numKeys ), output[i].value )
+ assert.eq( parseInt( output[i]._id ) * ( numDocs / numKeys ), output[i].value );
}
-jsTest.log( "Finishing parallel migrations..." )
+jsTest.log( "Finishing parallel migrations..." );
-printjson( benchFinish( bid ) )
+printjson( benchFinish( bid ) );
st.stop();
diff --git a/jstests/sharding/multi_coll_drop.js b/jstests/sharding/multi_coll_drop.js
index feb3cd41e60..c4c2362bf44 100644
--- a/jstests/sharding/multi_coll_drop.js
+++ b/jstests/sharding/multi_coll_drop.js
@@ -11,35 +11,35 @@ var collB = mB.getCollection('multidrop.coll');
jsTestLog( "Shard and split collection..." );
-var admin = mA.getDB( "admin" )
-admin.runCommand({ enableSharding : coll.getDB() + "" })
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } })
+var admin = mA.getDB( "admin" );
+admin.runCommand({ enableSharding : coll.getDB() + "" });
+admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
for( var i = -100; i < 100; i++ ){
- admin.runCommand({ split : coll + "", middle : { _id : i } })
+ admin.runCommand({ split : coll + "", middle : { _id : i } });
}
-jsTestLog( "Create versioned connection for each mongos..." )
+jsTestLog( "Create versioned connection for each mongos..." );
-coll.find().itcount()
-collB.find().itcount()
+coll.find().itcount();
+collB.find().itcount();
-jsTestLog( "Dropping sharded collection..." )
-coll.drop()
+jsTestLog( "Dropping sharded collection..." );
+coll.drop();
-jsTestLog( "Recreating collection..." )
+jsTestLog( "Recreating collection..." );
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } })
+admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
for( var i = -10; i < 10; i++ ){
- admin.runCommand({ split : coll + "", middle : { _id : i } })
+ admin.runCommand({ split : coll + "", middle : { _id : i } });
}
-jsTestLog( "Retrying connections..." )
+jsTestLog( "Retrying connections..." );
-coll.find().itcount()
-collB.find().itcount()
+coll.find().itcount();
+collB.find().itcount();
-jsTestLog( "Done." )
+jsTestLog( "Done." );
st.stop();
diff --git a/jstests/sharding/multi_mongos2.js b/jstests/sharding/multi_mongos2.js
index d6dbe8cb943..41458c3b223 100644
--- a/jstests/sharding/multi_mongos2.js
+++ b/jstests/sharding/multi_mongos2.js
@@ -8,11 +8,11 @@ s1.adminCommand( { enablesharding : "test" } );
s1.ensurePrimaryShard('test', 'shard0001');
s1.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
-s1.config.databases.find().forEach( printjson )
+s1.config.databases.find().forEach( printjson );
// test queries
-s1.getDB('test').existing.insert({_id:1})
+s1.getDB('test').existing.insert({_id:1});
assert.eq(1, s1.getDB('test').existing.count({_id:1}));
assert.eq(1, s2.getDB('test').existing.count({_id:1}));
@@ -27,29 +27,29 @@ assert.eq(1 , res.ok, tojson(res));
s1.startBalancer();
-printjson( s2.adminCommand( {"getShardVersion" : "test.existing" } ) )
-printjson( new Mongo(s1.getServer( "test" ).name).getDB( "admin" ).adminCommand( {"getShardVersion" : "test.existing" } ) )
+printjson( s2.adminCommand( {"getShardVersion" : "test.existing" } ) );
+printjson( new Mongo(s1.getServer( "test" ).name).getDB( "admin" ).adminCommand( {"getShardVersion" : "test.existing" } ) );
assert.eq(1, s1.getDB('test').existing.count({_id:1})); // SERVER-2828
assert.eq(1, s2.getDB('test').existing.count({_id:1}));
// test stats
-s1.getDB('test').existing2.insert({_id:1})
+s1.getDB('test').existing2.insert({_id:1});
assert.eq(1, s1.getDB('test').existing2.count({_id:1}));
assert.eq(1, s2.getDB('test').existing2.count({_id:1}));
s2.adminCommand( { shardcollection : "test.existing2" , key : { _id : 1 } } );
assert.commandWorked(s2.adminCommand({ split: "test.existing2", middle: { _id: 5 }}));
-var res = s1.getDB('test').existing2.stats()
-printjson( res )
+var res = s1.getDB('test').existing2.stats();
+printjson( res );
assert.eq(true, res.sharded); //SERVER-2828
assert.eq(true, s2.getDB('test').existing2.stats().sharded);
// test admin commands
-s1.getDB('test').existing3.insert({_id:1})
+s1.getDB('test').existing3.insert({_id:1});
assert.eq(1, s1.getDB('test').existing3.count({_id:1}));
assert.eq(1, s2.getDB('test').existing3.count({_id:1}));
diff --git a/jstests/sharding/multi_mongos2a.js b/jstests/sharding/multi_mongos2a.js
index 712e3cc9dd1..691c3c4a3c1 100644
--- a/jstests/sharding/multi_mongos2a.js
+++ b/jstests/sharding/multi_mongos2a.js
@@ -10,9 +10,9 @@ s1.adminCommand( { enablesharding : "test" } );
s1.ensurePrimaryShard('test', 'shard0001');
s1.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
-s1.config.databases.find().forEach( printjson )
+s1.config.databases.find().forEach( printjson );
-s1.getDB('test').existing.insert({_id:1})
+s1.getDB('test').existing.insert({_id:1});
assert.eq(1, s1.getDB('test').existing.count({_id:1}));
assert.eq(1, s2.getDB('test').existing.count({_id:1}));
@@ -24,7 +24,7 @@ res = s2.getDB( "admin" ).runCommand( { moveChunk: "test.existing" , find : { _i
assert.eq(1 , res.ok, tojson(res));
-s1.adminCommand( { flushRouterConfig : 1 } )
+s1.adminCommand( { flushRouterConfig : 1 } );
assert.eq(1, s1.getDB('test').existing.count({_id:1})); // SERVER-2828
assert.eq(1, s2.getDB('test').existing.count({_id:1}));
diff --git a/jstests/sharding/names.js b/jstests/sharding/names.js
index 5b30dc436c4..28612681e46 100644
--- a/jstests/sharding/names.js
+++ b/jstests/sharding/names.js
@@ -6,19 +6,19 @@ var st = new ShardingTest({ name: "HostNames",
mongos: 2,
other: { rs : true } });
-var rsA = new ReplSetTest({ nodes : 2, name : "rsA" })
-var rsB = new ReplSetTest({ nodes : 2, name : "rsB" })
+var rsA = new ReplSetTest({ nodes : 2, name : "rsA" });
+var rsB = new ReplSetTest({ nodes : 2, name : "rsB" });
-rsA.startSet()
-rsB.startSet()
-rsA.initiate()
-rsB.initiate()
-rsA.getPrimary()
-rsB.getPrimary()
+rsA.startSet();
+rsB.startSet();
+rsA.initiate();
+rsB.initiate();
+rsA.getPrimary();
+rsB.getPrimary();
-var mongos = st.s
-var config = mongos.getDB("config")
-var admin = mongos.getDB("admin")
+var mongos = st.s;
+var config = mongos.getDB("config");
+var admin = mongos.getDB("admin");
assert( admin.runCommand({ addShard : rsA.getURL(), name : rsB.name }).ok );
printjson( config.shards.find().toArray() );
@@ -39,7 +39,7 @@ assert.eq(rsA.getURL(), config.shards.findOne({_id:rsB.name})["host"], "Wrong ho
// Re-add shard
assert( admin.runCommand({ addShard : rsB.getURL(), name : rsA.name }).ok );
-printjson( config.shards.find().toArray() )
+printjson( config.shards.find().toArray() );
assert.eq(2, config.shards.count(), "Error re-adding a shard");
assert.eq(rsB.getURL(), config.shards.findOne({_id:rsA.name})["host"], "Wrong host for shard rsA 3");
diff --git a/jstests/sharding/no_empty_reset.js b/jstests/sharding/no_empty_reset.js
index 91207147e90..bda63ee9edc 100644
--- a/jstests/sharding/no_empty_reset.js
+++ b/jstests/sharding/no_empty_reset.js
@@ -1,34 +1,34 @@
// Tests that an empty shard can't be the cause of a chunk reset
-var st = new ShardingTest({ shards : 2, mongos : 2 })
+var st = new ShardingTest({ shards : 2, mongos : 2 });
// Don't balance since we're manually moving chunks
-st.stopBalancer()
+st.stopBalancer();
-var coll = st.s.getCollection( jsTestName() + ".coll" )
+var coll = st.s.getCollection( jsTestName() + ".coll" );
for( var i = -10; i < 10; i++ )
- coll.insert({ _id : i })
+ coll.insert({ _id : i });
-st.shardColl( coll, { _id : 1 }, { _id : 0 } )
+st.shardColl( coll, { _id : 1 }, { _id : 0 } );
-jsTestLog( "Sharded setup complete" )
+jsTestLog( "Sharded setup complete" );
-st.printShardingStatus()
+st.printShardingStatus();
-jsTestLog( "Setting initial versions for each mongos..." )
+jsTestLog( "Setting initial versions for each mongos..." );
-coll.find().itcount()
+coll.find().itcount();
-var collB = st.s1.getCollection( "" + coll )
-collB.find().itcount()
+var collB = st.s1.getCollection( "" + coll );
+collB.find().itcount();
-jsTestLog( "Migrating via first mongos..." )
+jsTestLog( "Migrating via first mongos..." );
-var fullShard = st.getShard( coll, { _id : 1 } )
-var emptyShard = st.getShard( coll, { _id : -1 } )
+var fullShard = st.getShard( coll, { _id : 1 } );
+var emptyShard = st.getShard( coll, { _id : -1 } );
-var admin = st.s.getDB( "admin" )
+var admin = st.s.getDB( "admin" );
assert.soon(
function () {
var result = admin.runCommand( { moveChunk: "" + coll,
@@ -42,22 +42,22 @@ assert.soon(
" to " + fullShard.shardName
);
-jsTestLog( "Resetting shard version via first mongos..." )
+jsTestLog( "Resetting shard version via first mongos..." );
-coll.find().itcount()
+coll.find().itcount();
-jsTestLog( "Making sure we don't insert into the wrong shard..." )
+jsTestLog( "Making sure we don't insert into the wrong shard..." );
-collB.insert({ _id : -11 })
+collB.insert({ _id : -11 });
-var emptyColl = emptyShard.getCollection( "" + coll )
+var emptyColl = emptyShard.getCollection( "" + coll );
-print( emptyColl )
-print( emptyShard )
-print( emptyShard.shardName )
-st.printShardingStatus()
+print( emptyColl );
+print( emptyShard );
+print( emptyShard.shardName );
+st.printShardingStatus();
-assert.eq( 0, emptyColl.find().itcount() )
+assert.eq( 0, emptyColl.find().itcount() );
jsTestLog("DONE!");
st.stop();
diff --git a/jstests/sharding/parallel.js b/jstests/sharding/parallel.js
index af0bdcc8d5b..a05cfa2d396 100644
--- a/jstests/sharding/parallel.js
+++ b/jstests/sharding/parallel.js
@@ -2,7 +2,7 @@
(function() {
"use strict";
-var numShards = 3
+var numShards = 3;
var s = new ShardingTest({name: "parallel", shards: numShards, mongos: 2});
s.adminCommand( { enablesharding : "test" } );
@@ -29,19 +29,19 @@ assert.writeOK(bulk.execute());
var doCommand = function( dbname , cmd ) {
x = benchRun( { ops : [ { op : "findOne" , ns : dbname + ".$cmd" , query : cmd } ] ,
- host : db.getMongo().host , parallel : 2 , seconds : 2 } )
- printjson(x)
+ host : db.getMongo().host , parallel : 2 , seconds : 2 } );
+ printjson(x);
x = benchRun( { ops : [ { op : "findOne" , ns : dbname + ".$cmd" , query : cmd } ] ,
- host : s._mongos[1].host , parallel : 2 , seconds : 2 } )
- printjson(x)
-}
+ host : s._mongos[1].host , parallel : 2 , seconds : 2 } );
+ printjson(x);
+};
-doCommand( "test" , { dbstats : 1 } )
-doCommand( "config" , { dbstats : 1 } )
+doCommand( "test" , { dbstats : 1 } );
+doCommand( "config" , { dbstats : 1 } );
-var x = s.getDB( "config" ).stats()
-assert( x.ok , tojson(x) )
-printjson(x)
+var x = s.getDB( "config" ).stats();
+assert( x.ok , tojson(x) );
+printjson(x);
-s.stop()
+s.stop();
}()); \ No newline at end of file
diff --git a/jstests/sharding/pending_chunk.js b/jstests/sharding/pending_chunk.js
index 55afb7150ed..3455699d9e0 100644
--- a/jstests/sharding/pending_chunk.js
+++ b/jstests/sharding/pending_chunk.js
@@ -86,4 +86,4 @@ st.printShardingStatus();
st.stop();
-})()
+})();
diff --git a/jstests/sharding/prefix_shard_key.js b/jstests/sharding/prefix_shard_key.js
index c83ac366ef6..1528e2a35af 100644
--- a/jstests/sharding/prefix_shard_key.js
+++ b/jstests/sharding/prefix_shard_key.js
@@ -33,14 +33,14 @@ for( i=0 ; i<100; i++){
assert.writeOK(bulk.execute());
//no usable index yet, should throw
-assert.throws( function(){ s.adminCommand( { shardCollection : coll.getFullName(), key : { num : 1 } } ) } )
+assert.throws( function(){ s.adminCommand( { shardCollection : coll.getFullName(), key : { num : 1 } } ); } );
//create usable index
assert.commandWorked(coll.ensureIndex({ num: 1, x: 1 }));
//usable index, but doc with empty 'num' value, so still should throw
assert.writeOK(coll.insert({ x: -5 }));
-assert.throws( function(){ s.adminCommand( { shardCollection : coll.getFullName(), key : { num : 1 } } ) } )
+assert.throws( function(){ s.adminCommand( { shardCollection : coll.getFullName(), key : { num : 1 } } ); } );
//remove the bad doc. now should finally succeed
assert.writeOK(coll.remove({ x: -5 }));
@@ -58,7 +58,7 @@ s.awaitBalance( coll.getName(), db.getName() );
assert.soon( function(){
print( "Waiting for migration cleanup to occur..." );
return coll.count() == coll.find().itcount();
-})
+});
s.stopBalancer();
@@ -180,7 +180,7 @@ for( i=0; i < 3; i++ ){
assert.soon( function(){
print( "Waiting for migration cleanup to occur..." );
return coll2.count() == coll2.find().itcount();
- })
+ });
// check no orphaned docs on the shards
assert.eq( 0 , shard0.getCollection( coll2 + "" ).find().itcount() );
diff --git a/jstests/sharding/query_config.js b/jstests/sharding/query_config.js
index ebafee9a7a2..dea4cf92258 100644
--- a/jstests/sharding/query_config.js
+++ b/jstests/sharding/query_config.js
@@ -15,7 +15,7 @@
};
var arrayGetNames = function(array) {
- return array.map(function(spec) { return spec.name });
+ return array.map(function(spec) { return spec.name; });
};
var cursorGetCollectionNames = function(cursor) {
@@ -40,7 +40,7 @@
} catch(err) {
assert.eq(err.code, ErrorCodes.NamespaceNotFound);
}
- }
+ };
/**
* Sets up the test database with with several sharded collections.
@@ -56,14 +56,14 @@
var testDB = st.s.getDB("test");
assert.commandWorked(st.s.adminCommand({enablesharding: testDB.getName()}));
- var testNamespaces = testCollNames.map(function(e) { return testDB.getName() + "." + e });
+ var testNamespaces = testCollNames.map(function(e) { return testDB.getName() + "." + e; });
for (var i = 0; i < testKeys.length; i++) {
assert.commandWorked(st.s.adminCommand({shardcollection: testNamespaces[i],
key: testKeys[i]}));
}
return testNamespaces;
- }
+ };
/**
* Test that a list collections query works on the config database. This test cannot detect
@@ -123,7 +123,7 @@
cursorArray.push(cursor.next());
assert(!cursor.hasNext());
assert.eq(arrayGetNames(sortArrayByName(cursorArray)), configChunksIndexes);
- }
+ };
/**
* Test queries over the collections collection of the config database.
@@ -232,15 +232,15 @@
if (this.ns == "test2.testColl") {
emit(this.shard, 1);
}
- }
+ };
var reduceFunction = function(key, values) {
return {chunks: values.length};
- }
+ };
result = configDB.chunks.mapReduce(mapFunction, reduceFunction, {out: {inline: 1}});
assert.eq(result.ok, 1);
assert.eq(sortArrayById(result.results), [{_id: shard1, value: {chunks: 2}},
{_id: shard2, value: {chunks: 3}}]);
- }
+ };
/**
* Test queries over a user created collection of an arbitrary database on the config servers.
@@ -253,7 +253,7 @@
{_id: 4, g: 2, c: 1, s: "a", u: [2, 4]},
{_id: 5, g: 2, c: 18, s: "d", u: [3]},
{_id: 6, g: 3, c: 11, s: "e", u: [2, 3]},
- {_id: 7, g: 3, c: 2, s: "f", u: [1]}]
+ {_id: 7, g: 3, c: 2, s: "f", u: [1]}];
var userCollIndexes = ["_id_", "s_1"];
var cursor;
var cursorArray;
@@ -328,10 +328,10 @@
// Map reduce query.
var mapFunction = function() {
emit(this.g, 1);
- }
+ };
var reduceFunction = function(key, values) {
return {count: values.length};
- }
+ };
result = userColl.mapReduce(mapFunction, reduceFunction, {out: {inline: 1}});
assert.eq(result.ok, 1);
assert.eq(sortArrayById(result.results), [{_id: 1, value: {count: 2}},
@@ -339,7 +339,7 @@
{_id: 3, value: {count: 2}}]);
assert(userColl.drop());
- }
+ };
var st = new ShardingTest({shards: 2, mongos: 1});
var testNamespaces = setupTestCollections(st);
diff --git a/jstests/sharding/recovering_slaveok.js b/jstests/sharding/recovering_slaveok.js
index baedcd59ba2..936f8856903 100644
--- a/jstests/sharding/recovering_slaveok.js
+++ b/jstests/sharding/recovering_slaveok.js
@@ -107,9 +107,9 @@ ReplSetTest.awaitRSClientHosts(collSOk.getMongo(), [rsB.getSecondaries()[0]],
print("SlaveOK Query...");
var sOKCount = collSOk.find().itcount();
-var collCount = null
+var collCount = null;
try{
- print("Normal query...")
+ print("Normal query...");
collCount = coll.find().itcount();
}
catch(e){
@@ -117,7 +117,7 @@ catch(e){
// There may have been a stepdown caused by step 8, so we run this twice in a row. The first
// time can error out.
- print("Error may have been caused by stepdown, try again.")
+ print("Error may have been caused by stepdown, try again.");
collCount = coll.find().itcount();
}
diff --git a/jstests/sharding/remove2.js b/jstests/sharding/remove2.js
index e3f1dbb4970..76d02df38b9 100644
--- a/jstests/sharding/remove2.js
+++ b/jstests/sharding/remove2.js
@@ -7,7 +7,7 @@ seedString = function(replTest) {
removeShard = function(st, replTest) {
print( "Removing shard with name: " + replTest.name );
- res = st.admin.runCommand( { removeshard: replTest.name } )
+ res = st.admin.runCommand( { removeshard: replTest.name } );
printjson(res);
assert( res.ok , "failed to start draining shard" );
@@ -15,15 +15,15 @@ removeShard = function(st, replTest) {
res = st.admin.runCommand( { removeshard: replTest.name } );
printjson(res);
return res.ok && res.msg == 'removeshard completed successfully';
- }
+ };
assert.soon( checkRemoveShard, "failed to remove shard", 5 * 60000 );
// Need to wait for migration to be over... only works for inline deletes
checkNSLock = function() {
- printjson( st.s.getDB( "config" ).locks.find().toArray() )
+ printjson( st.s.getDB( "config" ).locks.find().toArray() );
return !st.isAnyBalanceInFlight();
- }
- assert.soon( checkNSLock, "migrations did not end?" )
+ };
+ assert.soon( checkNSLock, "migrations did not end?" );
sleep( 2000 );
@@ -44,7 +44,7 @@ addShard = function(st, replTest) {
try {
assert.eq(true, st.adminCommand({ addshard : seed }));
} catch (e) {
- print("First attempt to addShard failed, trying again")
+ print("First attempt to addShard failed, trying again");
// transport error on first attempt is expected. Make sure second attempt goes through
assert.eq(true, st.adminCommand({ addshard : seed }));
}
@@ -125,13 +125,13 @@ assert.eq( 300, coll.find().itcount() );
st.admin.printShardingStatus();
// Remove shard and add it back in, without shutting it down.
-jsTestLog( "Attempting to remove shard and add it back in" )
+jsTestLog( "Attempting to remove shard and add it back in" );
removeShard( st, rst1 );
addShard(st, rst1 );
// Remove shard, restart set, then add it back in.
-jsTestLog( "Attempting to remove shard, restart the set, and then add it back in" )
+jsTestLog( "Attempting to remove shard, restart the set, and then add it back in" );
originalSeed = seedString(rst1);
removeShard( st, rst1 );
@@ -214,6 +214,6 @@ rst1.awaitReplication();
assert.eq( originalSeed, seedString(rst1), "Set didn't come back up with the same hosts as before" );
addShard( st, rst1 );
-jsTestLog( "finishing!" )
+jsTestLog( "finishing!" );
// this should be fixed by SERVER-22176
-st.stop({ allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] })
+st.stop({ allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] });
diff --git a/jstests/sharding/replset_config/config_rs_no_primary.js b/jstests/sharding/replset_config/config_rs_no_primary.js
index 110320ded60..9fce3421474 100644
--- a/jstests/sharding/replset_config/config_rs_no_primary.js
+++ b/jstests/sharding/replset_config/config_rs_no_primary.js
@@ -35,11 +35,11 @@ var testOps = function(mongos) {
mongos.setSlaveOk(false);
assert.neq(null, shardDoc);
- jsTestLog("Doing ops that require metadata writes and thus should fail against: " + mongos)
+ jsTestLog("Doing ops that require metadata writes and thus should fail against: " + mongos);
assert.writeError(mongos.getDB("newDB").foo.insert({a:1}));
assert.commandFailed(mongos.getDB('admin').runCommand({shardCollection: "test.foo",
key: {a:1}}));
-}
+};
testOps(mongos2);
testOps(st.s);
diff --git a/jstests/sharding/return_partial_shards_down.js b/jstests/sharding/return_partial_shards_down.js
index 08debd9768a..d2519f0ae5e 100644
--- a/jstests/sharding/return_partial_shards_down.js
+++ b/jstests/sharding/return_partial_shards_down.js
@@ -21,7 +21,7 @@ for ( var i = 0; i < shards.length; i++) {
var collOneShard = mongos.getCollection("foo.collOneShard");
var collAllShards = mongos.getCollection("foo.collAllShards");
-printjson(admin.runCommand({enableSharding : collOneShard.getDB() + ""}))
+printjson(admin.runCommand({enableSharding : collOneShard.getDB() + ""}));
printjson(admin.runCommand({movePrimary : collOneShard.getDB() + "",
to : shards[0]._id}));
@@ -64,29 +64,29 @@ assert.eq(3, collAllShards.find().itcount());
assert.eq(3, collOneShard.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
assert.eq(3, collAllShards.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
-jsTest.log("One shard down!")
+jsTest.log("One shard down!");
-MongoRunner.stopMongod(st.shard2)
+MongoRunner.stopMongod(st.shard2);
-jsTest.log("done.")
+jsTest.log("done.");
assert.eq(3, collOneShard.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
assert.eq(2, collAllShards.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
-jsTest.log("Two shards down!")
+jsTest.log("Two shards down!");
-MongoRunner.stopMongod(st.shard1)
+MongoRunner.stopMongod(st.shard1);
-jsTest.log("done.")
+jsTest.log("done.");
assert.eq(3, collOneShard.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
assert.eq(1, collAllShards.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
-jsTest.log("All shards down!")
+jsTest.log("All shards down!");
-MongoRunner.stopMongod(st.shard0)
+MongoRunner.stopMongod(st.shard0);
-jsTest.log("done.")
+jsTest.log("done.");
assert.eq(0, collOneShard.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
assert.eq(0, collAllShards.find({}, {}, 0, 0, 0, returnPartialFlag).itcount());
diff --git a/jstests/sharding/rs_stepdown_and_pooling.js b/jstests/sharding/rs_stepdown_and_pooling.js
index c66e5037825..3cc97bfe147 100644
--- a/jstests/sharding/rs_stepdown_and_pooling.js
+++ b/jstests/sharding/rs_stepdown_and_pooling.js
@@ -64,7 +64,7 @@ else {
if (result != null) printjson(result);
assert.eq(null, result);
- }
+ };
stepDown(primary, 0);
diff --git a/jstests/sharding/shard1.js b/jstests/sharding/shard1.js
index 6243007a6c3..faf852c6044 100644
--- a/jstests/sharding/shard1.js
+++ b/jstests/sharding/shard1.js
@@ -25,11 +25,11 @@ assert.throws( function(){ s.adminCommand({ shardCollection: 'test', key: { x: 1
assert.throws( function(){ s.adminCommand({ shardCollection: '.foo', key: { x: 1 }}); });
var cconfig = s.config.collections.findOne( { _id : "test.foo" } );
-assert( cconfig , "why no collection entry for test.foo" )
+assert( cconfig , "why no collection entry for test.foo" );
-delete cconfig.lastmod
-delete cconfig.dropped
-delete cconfig.lastmodEpoch
+delete cconfig.lastmod;
+delete cconfig.dropped;
+delete cconfig.lastmodEpoch;
assert.eq(cconfig,
{ _id : "test.foo" , key : { num : 1 } , unique : false },
diff --git a/jstests/sharding/shard2.js b/jstests/sharding/shard2.js
index f8c925ee3e0..b94aa260c5c 100644
--- a/jstests/sharding/shard2.js
+++ b/jstests/sharding/shard2.js
@@ -6,23 +6,23 @@
placeCheck = function( num ){
print("shard2 step: " + num );
-}
+};
printAll = function(){
print( "****************" );
- db.foo.find().forEach( printjsononeline )
+ db.foo.find().forEach( printjsononeline );
print( "++++++++++++++++++" );
- primary.foo.find().forEach( printjsononeline )
+ primary.foo.find().forEach( printjsononeline );
print( "++++++++++++++++++" );
- secondary.foo.find().forEach( printjsononeline )
+ secondary.foo.find().forEach( printjsononeline );
print( "---------------------" );
-}
+};
s = new ShardingTest({name: "shard2", shards: 2});
// We're doing a lot of checks here that can get screwed up by the balancer, now that
// it moves small #s of chunks too
-s.stopBalancer()
+s.stopBalancer();
db = s.getDB( "test" );
@@ -57,7 +57,7 @@ placeCheck( 2 );
// test move shard
assert.throws( function(){ s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : primary.getMongo().name, _waitForDelete : true } ); } );
-assert.throws( function(){ s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : "adasd", _waitForDelete : true } ) } );
+assert.throws( function(){ s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : "adasd", _waitForDelete : true } ); } );
s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : secondary.getMongo().name, _waitForDelete : true } );
assert.eq( 2 , secondary.foo.find().length() , "secondary should have 2 after move shard" );
@@ -139,18 +139,18 @@ assert.eq( "funny man" , db.foo.find( { num : { $lt : 100 } } ).sort( { num : 1
placeCheck( 7 );
-db.foo.find().sort( { _id : 1 } ).forEach( function(z){ print( z._id ); } )
+db.foo.find().sort( { _id : 1 } ).forEach( function(z){ print( z._id ); } );
zzz = db.foo.find().explain("executionStats").executionStats;
-assert.eq( 0 , zzz.totalKeysExamined , "EX1a" )
-assert.eq( 6 , zzz.nReturned , "EX1b" )
-assert.eq( 6 , zzz.totalDocsExamined , "EX1c" )
+assert.eq( 0 , zzz.totalKeysExamined , "EX1a" );
+assert.eq( 6 , zzz.nReturned , "EX1b" );
+assert.eq( 6 , zzz.totalDocsExamined , "EX1c" );
zzz = db.foo.find().hint( { _id : 1 } ).sort( { _id : 1 } )
.explain("executionStats").executionStats;
-assert.eq( 6 , zzz.totalKeysExamined , "EX2a" )
-assert.eq( 6 , zzz.nReturned , "EX2b" )
-assert.eq( 6 , zzz.totalDocsExamined , "EX2c" )
+assert.eq( 6 , zzz.totalKeysExamined , "EX2a" );
+assert.eq( 6 , zzz.nReturned , "EX2b" );
+assert.eq( 6 , zzz.totalDocsExamined , "EX2c" );
// getMore
assert.eq( 4 , db.foo.find().limit(-4).toArray().length , "getMore 1" );
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index abae4f19eb7..f6bc9c50514 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -7,7 +7,7 @@ var s = new ShardingTest({name: "shard3", shards: 2, mongos: 2, other: { enableB
s2 = s._mongos[1];
-db = s.getDB( "test" )
+db = s.getDB( "test" );
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
@@ -16,15 +16,15 @@ if (s.configRS) {
s.configRS.awaitLastOpCommitted();
}
-assert( sh.getBalancerState() , "A1" )
+assert( sh.getBalancerState() , "A1" );
sh.setBalancerState(false);
-assert( ! sh.getBalancerState() , "A2" )
+assert( ! sh.getBalancerState() , "A2" );
sh.setBalancerState(true);
-assert( sh.getBalancerState() , "A3" )
+assert( sh.getBalancerState() , "A3" );
sh.setBalancerState(false);
-assert( ! sh.getBalancerState() , "A4" )
+assert( ! sh.getBalancerState() , "A4" );
-s.config.databases.find().forEach( printjson )
+s.config.databases.find().forEach( printjson );
a = s.getDB( "test" ).foo;
b = s2.getDB( "test" ).foo;
@@ -39,8 +39,8 @@ a.save( { num : 3 } );
assert.eq( 3 , a.find().toArray().length , "normal A" );
assert.eq( 3 , b.find().toArray().length , "other A" );
-assert.eq( 3 , primary.count() , "p1" )
-assert.eq( 0 , secondary.count() , "s1" )
+assert.eq( 3 , primary.count() , "p1" );
+assert.eq( 0 , secondary.count() , "s1" );
assert.eq( 1 , s.onNumShards( "foo" ) , "on 1 shards" );
@@ -49,7 +49,7 @@ s.adminCommand( { movechunk : "test.foo" , find : { num : 3 } , to : s.getOther(
assert( primary.find().toArray().length > 0 , "blah 1" );
assert( secondary.find().toArray().length > 0 , "blah 2" );
-assert.eq( 3 , primary.find().itcount() + secondary.find().itcount() , "blah 3" )
+assert.eq( 3 , primary.find().itcount() + secondary.find().itcount() , "blah 3" );
assert.eq( 3 , a.find().toArray().length , "normal B" );
assert.eq( 3 , b.find().toArray().length , "other B" );
@@ -68,24 +68,24 @@ function doCounts( name , total , onlyItCounts ){
return total;
}
-var total = doCounts( "before wrong save" )
+var total = doCounts( "before wrong save" );
assert.writeOK(secondary.insert( { _id : 111 , num : -3 } ));
-doCounts( "after wrong save" , total , true )
+doCounts( "after wrong save" , total , true );
e = a.find().explain("executionStats").executionStats;
-assert.eq( 3 , e.nReturned , "ex1" )
-assert.eq( 0 , e.totalKeysExamined , "ex2" )
-assert.eq( 4 , e.totalDocsExamined , "ex3" )
+assert.eq( 3 , e.nReturned , "ex1" );
+assert.eq( 0 , e.totalKeysExamined , "ex2" );
+assert.eq( 4 , e.totalDocsExamined , "ex3" );
var chunkSkips = 0;
for (var shard in e.executionStages.shards) {
var theShard = e.executionStages.shards[shard];
chunkSkips += getChunkSkips(theShard.executionStages);
}
-assert.eq( 1 , chunkSkips , "ex4" )
+assert.eq( 1 , chunkSkips , "ex4" );
// SERVER-4612
// make sure idhack obeys chunks
-x = a.findOne( { _id : 111 } )
+x = a.findOne( { _id : 111 } );
assert( ! x , "idhack didn't obey chunk boundaries " + tojson(x) );
// --- move all to 1 ---
@@ -94,20 +94,20 @@ print( "MOVE ALL TO 1" );
assert.eq( 2 , s.onNumShards( "foo" ) , "on 2 shards" );
s.printCollectionInfo( "test.foo" );
-assert( a.findOne( { num : 1 } ) )
-assert( b.findOne( { num : 1 } ) )
+assert( a.findOne( { num : 1 } ) );
+assert( b.findOne( { num : 1 } ) );
print( "GOING TO MOVE" );
-assert( a.findOne( { num : 1 } ) , "pre move 1" )
+assert( a.findOne( { num : 1 } ) , "pre move 1" );
s.printCollectionInfo( "test.foo" );
-myto = s.getOther( s.getServer( "test" ) ).name
+myto = s.getOther( s.getServer( "test" ) ).name;
print( "counts before move: " + tojson( s.shardCounts( "foo" ) ) );
-s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : myto, _waitForDelete : true } )
+s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : myto, _waitForDelete : true } );
print( "counts after move: " + tojson( s.shardCounts( "foo" ) ) );
s.printCollectionInfo( "test.foo" );
assert.eq( 1 , s.onNumShards( "foo" ) , "on 1 shard again" );
-assert( a.findOne( { num : 1 } ) , "post move 1" )
-assert( b.findOne( { num : 1 } ) , "post move 2" )
+assert( a.findOne( { num : 1 } ) , "post move 1" );
+assert( b.findOne( { num : 1 } ) , "post move 2" );
print( "*** drop" );
@@ -115,17 +115,17 @@ s.printCollectionInfo( "test.foo" , "before drop" );
a.drop();
s.printCollectionInfo( "test.foo" , "after drop" );
-assert.eq( 0 , a.count() , "a count after drop" )
-assert.eq( 0 , b.count() , "b count after drop" )
+assert.eq( 0 , a.count() , "a count after drop" );
+assert.eq( 0 , b.count() , "b count after drop" );
s.printCollectionInfo( "test.foo" , "after counts" );
-assert.eq( 0 , primary.count() , "p count after drop" )
-assert.eq( 0 , secondary.count() , "s count after drop" )
+assert.eq( 0 , primary.count() , "p count after drop" );
+assert.eq( 0 , secondary.count() , "s count after drop" );
-print( "*** dropDatabase setup" )
+print( "*** dropDatabase setup" );
-s.printShardingStatus()
+s.printShardingStatus();
s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
a.save( { num : 2 } );
a.save( { num : 3 } );
@@ -134,10 +134,10 @@ s.adminCommand( { movechunk : "test.foo" , find : { num : 3 } , to : s.getOther(
s.printShardingStatus();
s.printCollectionInfo( "test.foo" , "after dropDatabase setup" );
-doCounts( "after dropDatabase setup2" )
+doCounts( "after dropDatabase setup2" );
s.printCollectionInfo( "test.foo" , "after dropDatabase setup3" );
-print( "*** ready to call dropDatabase" )
+print( "*** ready to call dropDatabase" );
res = s.getDB( "test" ).dropDatabase();
assert.eq( 1 , res.ok , "dropDatabase failed : " + tojson( res ) );
// Waiting for SERVER-2253
@@ -145,12 +145,12 @@ assert.eq( 0 , s.config.databases.count( { _id: "test" } ) , "database 'test' wa
s.printShardingStatus();
s.printCollectionInfo( "test.foo" , "after dropDatabase call 1" );
-assert.eq( 0 , doCounts( "after dropDatabase called" ) )
+assert.eq( 0 , doCounts( "after dropDatabase called" ) );
// ---- retry commands SERVER-1471 ----
s.adminCommand( { enablesharding : "test2" } );
-s.ensurePrimaryShard('test2', 'shard0000')
+s.ensurePrimaryShard('test2', 'shard0000');
s.adminCommand( { shardcollection : "test2.foo" , key : { num : 1 } } );
dba = s.getDB( "test2" );
dbb = s2.getDB( "test2" );
@@ -167,10 +167,10 @@ s.adminCommand( { movechunk : "test2.foo" , find : { num : 3 } , to : s.getOther
assert.eq( 2 , s.onNumShards( "foo" , "test2" ) , "B on 2 shards" );
-x = dba.foo.stats()
-printjson( x )
-y = dbb.foo.stats()
-printjson( y )
+x = dba.foo.stats();
+printjson( x );
+y = dbb.foo.stats();
+printjson( y );
s.stop();
diff --git a/jstests/sharding/shard5.js b/jstests/sharding/shard5.js
index a33e4b226fc..47ba37c43db 100644
--- a/jstests/sharding/shard5.js
+++ b/jstests/sharding/shard5.js
@@ -39,7 +39,7 @@ assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B" );
s.adminCommand( { split : "test.foo" , middle : { num : 2 } } );
//s.adminCommand( { movechunk : "test.foo" , find : { num : 3 } , to : s.getOther( s.getServer( "test" ) ).name } );
-s.printChunks()
+s.printChunks();
print( "* A" );
@@ -50,7 +50,7 @@ s2.getDB( "test" ).foo.save( { num : 2 } );
assert.soon(
function(){
return 8 == s2.getDB( "test" ).foo.find().toArray().length;
- } , "other B 2" , 5000 , 100 )
+ } , "other B 2" , 5000 , 100 );
assert.eq( 2 , s.onNumShards( "foo" ) , "on 2 shards" );
diff --git a/jstests/sharding/shard6.js b/jstests/sharding/shard6.js
index bab1ced9d8b..843780a6fa0 100644
--- a/jstests/sharding/shard6.js
+++ b/jstests/sharding/shard6.js
@@ -15,19 +15,19 @@ db = s.getDB( "test" );
function poolStats( where ){
var total = 0;
var msg = "poolStats " + where + " ";
- var x = db.runCommand( "connPoolStats" ).hosts
+ var x = db.runCommand( "connPoolStats" ).hosts;
for ( var h in x ){
var z = x[h];
msg += z.created + " ";
- total += z.created
+ total += z.created;
}
printjson( x );
- print( "****\n" + msg + "\n*****" )
+ print( "****\n" + msg + "\n*****" );
summary += msg + "\n";
- return total
+ return total;
}
-poolStats( "at start" )
+poolStats( "at start" );
// we want a lot of data, so lets make a 50k string to cheat :)
bigString = "";
@@ -42,7 +42,7 @@ for ( ; num<100; num++ ){
assert.eq( 100 , db.data.find().toArray().length , "basic find after setup" );
-connBefore = poolStats( "setup done" )
+connBefore = poolStats( "setup done" );
// limit
@@ -58,7 +58,7 @@ assert.eq( connBefore , poolStats( "limit test done" ) , "limit test conns" );
function assertOrder( start , num ){
var a = db.data.find().skip(start).limit(num).sort( { num : 1 } ).map( function(z){ return z.num; } );
- var c = []
+ var c = [];
for ( var i=0; i<num; i++ )
c.push( start + i );
assert.eq( c , a , "assertOrder start: " + start + " num: " + num );
@@ -67,16 +67,16 @@ function assertOrder( start , num ){
assertOrder( 0 , 10 );
assertOrder( 5 , 10 );
-poolStats( "after checking order" )
+poolStats( "after checking order" );
function doItCount( skip , sort , batchSize ){
var c = db.data.find();
if ( skip )
- c.skip( skip )
+ c.skip( skip );
if ( sort )
c.sort( sort );
if ( batchSize )
- c.batchSize( batchSize )
+ c.batchSize( batchSize );
return c.itcount();
}
@@ -89,25 +89,25 @@ function checkItCount( batchSize ){
assert.eq( 0 , doItCount( num + 5 , { _id : 1 } , batchSize ) , "skip 5 " + batchSize );
}
-poolStats( "before checking itcount" )
+poolStats( "before checking itcount" );
-checkItCount( 0 )
-checkItCount( 2 )
+checkItCount( 0 );
+checkItCount( 2 );
-poolStats( "after checking itcount" )
+poolStats( "after checking itcount" );
// --- test save support ---
o = db.data.findOne();
o.x = 16;
db.data.save( o );
-o = db.data.findOne( { _id : o._id } )
+o = db.data.findOne( { _id : o._id } );
assert.eq( 16 , o.x , "x1 - did save fail? " + tojson(o) );
-poolStats( "at end" )
+poolStats( "at end" );
-print( summary )
+print( summary );
-assert.throws( function(){ s.adminCommand( { enablesharding : "admin" } ) } )
+assert.throws( function(){ s.adminCommand( { enablesharding : "admin" } ); } );
s.stop();
diff --git a/jstests/sharding/shard_collection_basic.js b/jstests/sharding/shard_collection_basic.js
index 50e5a45bddf..d83ae01a39c 100644
--- a/jstests/sharding/shard_collection_basic.js
+++ b/jstests/sharding/shard_collection_basic.js
@@ -178,4 +178,4 @@ assert.commandFailed(mongos.adminCommand({
st.stop();
-})()
+})();
diff --git a/jstests/sharding/shard_existing.js b/jstests/sharding/shard_existing.js
index 35f8d4bd1b0..9473ae62a03 100644
--- a/jstests/sharding/shard_existing.js
+++ b/jstests/sharding/shard_existing.js
@@ -5,7 +5,7 @@ var s = new ShardingTest({ name: "shard_existing",
mongos: 1,
other: { chunkSize: 1 } });
-db = s.getDB( "test" )
+db = s.getDB( "test" );
var stringSize = 10000;
var numDocs = 2000;
diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js
index 58f04b5f9c8..8df1b9caa8f 100644
--- a/jstests/sharding/shard_insert_getlasterror_w2.js
+++ b/jstests/sharding/shard_insert_getlasterror_w2.js
@@ -49,7 +49,7 @@
testDB[testCollName].ensureIndex({ x : 1 });
assert.commandWorked(mongosConn.getDB('admin').
runCommand({ shardcollection : testDBName + '.' + testCollName,
- key : { x : 1 }}))
+ key : { x : 1 }}));
// Test case where GLE should return an error
testDB.foo.insert({_id:'a', x:1});
diff --git a/jstests/sharding/shard_keycount.js b/jstests/sharding/shard_keycount.js
index 5702b59dc84..0524a210918 100644
--- a/jstests/sharding/shard_keycount.js
+++ b/jstests/sharding/shard_keycount.js
@@ -6,14 +6,14 @@ var s = new ShardingTest({ name: "shard_keycount",
mongos: 1,
other:{ chunkSize: 1 } });
-dbName = "test"
-collName = "foo"
-ns = dbName + "." + collName
+dbName = "test";
+collName = "foo";
+ns = dbName + "." + collName;
db = s.getDB( dbName );
for(var i = 0; i < 10; i++){
- db.foo.insert({ _id : i })
+ db.foo.insert({ _id : i });
}
// Enable sharding on DB
@@ -25,25 +25,25 @@ s.adminCommand( { shardcollection : ns, key : { _id : 1 } } );
// Split into two chunks
-s.adminCommand({ split : ns, find : { _id : 3 } })
+s.adminCommand({ split : ns, find : { _id : 3 } });
-coll = db.getCollection( collName )
+coll = db.getCollection( collName );
// Split chunk again
-s.adminCommand({ split : ns, find : { _id : 3 } })
+s.adminCommand({ split : ns, find : { _id : 3 } });
-coll.update({ _id : 3 }, { _id : 3 })
+coll.update({ _id : 3 }, { _id : 3 });
// Split chunk again
-s.adminCommand({ split : ns, find : { _id : 3 } })
+s.adminCommand({ split : ns, find : { _id : 3 } });
-coll.update({ _id : 3 }, { _id : 3 })
+coll.update({ _id : 3 }, { _id : 3 });
// Split chunk again
// FAILS since the key count is based on the full index, not the chunk itself
// i.e. Split point calc'd is 5 key offset (10 documents), but only four docs
// in chunk with bounds _id : 0 => 5
-s.adminCommand({ split : ns, find : { _id : 3 } })
+s.adminCommand({ split : ns, find : { _id : 3 } });
s.stop();
diff --git a/jstests/sharding/shard_targeting.js b/jstests/sharding/shard_targeting.js
index 1189e4e6cf3..15105d880ff 100644
--- a/jstests/sharding/shard_targeting.js
+++ b/jstests/sharding/shard_targeting.js
@@ -17,7 +17,7 @@ var res;
//
// Shard key is the same with command name.
-s.shardColl("foo", {count: 1}, { count: "" })
+s.shardColl("foo", {count: 1}, { count: "" });
for (var i=0; i<50; i++) {
db.foo.insert({count: i}); // chunk [MinKey, ""), including numbers
@@ -40,7 +40,7 @@ assert.eq(res.n, 100);
//
db.foo.drop();
// Shard key is the same with command name.
-s.shardColl("foo", {mapReduce: 1}, { mapReduce: "" })
+s.shardColl("foo", {mapReduce: 1}, { mapReduce: "" });
for (var i=0; i<50; i++) {
db.foo.insert({mapReduce: i}); // to the chunk including number
diff --git a/jstests/sharding/sharding_balance1.js b/jstests/sharding/sharding_balance1.js
index 6142964a35c..57df8648559 100644
--- a/jstests/sharding/sharding_balance1.js
+++ b/jstests/sharding/sharding_balance1.js
@@ -8,11 +8,11 @@ var s = new ShardingTest({ name: "slow_sharding_balance1",
s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
-s.config.settings.find().forEach( printjson )
+s.config.settings.find().forEach( printjson );
db = s.getDB( "test" );
-bigString = ""
+bigString = "";
while ( bigString.length < 10000 )
bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
@@ -30,7 +30,7 @@ assert.lt( 20 , s.config.chunks.count() , "setup2" );
function diff1(){
var x = s.chunkCounts( "foo" );
- printjson( x )
+ printjson( x );
return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
}
@@ -40,7 +40,7 @@ function sum(){
}
assert.lt( 20 , diff1() , "big differential here" );
-print( diff1() )
+print( diff1() );
assert.soon( function(){
var d = diff1();
diff --git a/jstests/sharding/sharding_balance3.js b/jstests/sharding/sharding_balance3.js
index 79a979fa503..51e5765b19e 100644
--- a/jstests/sharding/sharding_balance3.js
+++ b/jstests/sharding/sharding_balance3.js
@@ -14,7 +14,7 @@ s.config.settings.find().forEach( printjson );
db = s.getDB( "test" );
-bigString = ""
+bigString = "";
while ( bigString.length < 10000 )
bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
@@ -32,7 +32,7 @@ assert.lt( 20 , s.config.chunks.count() , "setup2" );
function diff1(){
var x = s.chunkCounts( "foo" );
- printjson( x )
+ printjson( x );
return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
}
@@ -51,7 +51,7 @@ s.config.settings.find().forEach( printjson );
print("* B");
-print( diff1() )
+print( diff1() );
var currDiff = diff1();
var waitTime = 0;
diff --git a/jstests/sharding/sharding_balance4.js b/jstests/sharding/sharding_balance4.js
index 8616f697ec3..3a89efce5c7 100644
--- a/jstests/sharding/sharding_balance4.js
+++ b/jstests/sharding/sharding_balance4.js
@@ -11,19 +11,19 @@ s.ensurePrimaryShard('test', 'shard0001');
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
assert.eq( 1 , s.config.chunks.count() , "setup1" );
-s.config.settings.find().forEach( printjson )
+s.config.settings.find().forEach( printjson );
db = s.getDB( "test" );
-bigString = ""
+bigString = "";
while ( bigString.length < 10000 )
bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-N = 3000
+N = 3000;
num = 0;
-counts = {}
+counts = {};
//
// TODO: Rewrite to make much clearer.
@@ -38,10 +38,10 @@ counts = {}
function doUpdate( bulk, includeString, optionalId ){
- var up = { $inc : { x : 1 } }
+ var up = { $inc : { x : 1 } };
if ( includeString )
up["$set"] = { s : bigString };
- var myid = optionalId == undefined ? Random.randInt( N ) : optionalId
+ var myid = optionalId == undefined ? Random.randInt( N ) : optionalId;
bulk.find({ _id : myid }).upsert().update( up );
counts[myid] = ( counts[myid] ? counts[myid] : 0 ) + 1;
@@ -62,12 +62,12 @@ for ( i=0; i<N*9; i++ ){
assert.writeOK(bulk.execute());
for ( var i=0; i<50; i++ ){
- s.printChunks( "test.foo" )
+ s.printChunks( "test.foo" );
if ( check( "initial:" + i , true ) )
break;
- sleep( 5000 )
+ sleep( 5000 );
}
-check( "initial at end" )
+check( "initial at end" );
assert.lt( 20 , s.config.chunks.count() , "setup2" );
@@ -75,7 +75,7 @@ assert.lt( 20 , s.config.chunks.count() , "setup2" );
function check( msg , dontAssert ){
for ( var x in counts ){
var e = counts[x];
- var z = db.foo.findOne( { _id : parseInt( x ) } )
+ var z = db.foo.findOne( { _id : parseInt( x ) } );
if ( z && z.x == e )
continue;
@@ -83,24 +83,24 @@ function check( msg , dontAssert ){
if ( dontAssert ){
if ( z )
delete z.s;
- print( "not asserting for key failure: " + x + " want: " + e + " got: " + tojson(z) )
+ print( "not asserting for key failure: " + x + " want: " + e + " got: " + tojson(z) );
return false;
}
- s.s.getDB("admin").runCommand({ setParameter : 1, logLevel : 2 })
+ s.s.getDB("admin").runCommand({ setParameter : 1, logLevel : 2 });
- printjson( db.foo.findOne( { _id : parseInt( x ) } ) )
+ printjson( db.foo.findOne( { _id : parseInt( x ) } ) );
- var y = db.foo.findOne( { _id : parseInt( x ) } )
+ var y = db.foo.findOne( { _id : parseInt( x ) } );
if ( y ){
delete y.s;
}
- s.printChunks( "test.foo" )
+ s.printChunks( "test.foo" );
- assert( z , "couldn't find : " + x + " y:" + tojson(y) + " e: " + e + " " + msg )
- assert.eq( e , z.x , "count for : " + x + " y:" + tojson(y) + " " + msg )
+ assert( z , "couldn't find : " + x + " y:" + tojson(y) + " e: " + e + " " + msg );
+ assert.eq( e , z.x , "count for : " + x + " y:" + tojson(y) + " " + msg );
}
return true;
@@ -108,7 +108,7 @@ function check( msg , dontAssert ){
function diff1(){
- jsTest.log("Running diff1...")
+ jsTest.log("Running diff1...");
bulk = db.foo.initializeUnorderedBulkOp();
var myid = doUpdate( bulk, false );
@@ -119,14 +119,14 @@ function diff1(){
" correct count is: " + counts[myid] +
" db says count is: " + tojson(db.foo.findOne({ _id: myid })) );
- var x = s.chunkCounts( "foo" )
+ var x = s.chunkCounts( "foo" );
if ( Math.random() > .999 )
- printjson( x )
+ printjson( x );
return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
}
assert.lt( 20 , diff1() ,"initial load" );
-print( diff1() )
+print( diff1() );
s.startBalancer();
diff --git a/jstests/sharding/sharding_migrate_cursor1.js b/jstests/sharding/sharding_migrate_cursor1.js
index 81b91eb420a..86814ed17a8 100644
--- a/jstests/sharding/sharding_migrate_cursor1.js
+++ b/jstests/sharding/sharding_migrate_cursor1.js
@@ -1,7 +1,7 @@
// SERVER-2068
(function() {
-var chunkSize = 25
+var chunkSize = 25;
var s = new ShardingTest({ name: "migrate_cursor1",
shards: 2,
@@ -9,22 +9,22 @@ var s = new ShardingTest({ name: "migrate_cursor1",
other: { chunkSize : chunkSize } });
s.adminCommand( { enablesharding : "test" } );
-db = s.getDB( "test" )
+db = s.getDB( "test" );
s.ensurePrimaryShard('test', 'shard0001');
-t = db.foo
+t = db.foo;
-bigString = ""
+bigString = "";
stringSize = 1024;
while ( bigString.length < stringSize )
bigString += "asdasdas";
-stringSize = bigString.length
-docsPerChunk = Math.ceil( ( chunkSize * 1024 * 1024 ) / ( stringSize - 12 ) )
-numChunks = 5
-numDocs = 20 * docsPerChunk
+stringSize = bigString.length;
+docsPerChunk = Math.ceil( ( chunkSize * 1024 * 1024 ) / ( stringSize - 12 ) );
+numChunks = 5;
+numDocs = 20 * docsPerChunk;
-print( "stringSize: " + stringSize + " docsPerChunk: " + docsPerChunk + " numDocs: " + numDocs )
+print( "stringSize: " + stringSize + " docsPerChunk: " + docsPerChunk + " numDocs: " + numDocs );
var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < numDocs; i++){
@@ -37,35 +37,35 @@ s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
assert.lt( numChunks , s.config.chunks.find().count() , "initial 1" );
primary = s.getServer( "test" ).getDB( "test" ).foo;
-secondaryName = s.getOther( primary.name )
+secondaryName = s.getOther( primary.name );
secondary = secondaryName.getDB( "test" ).foo;
assert.eq( numDocs , primary.count() , "initial 2" );
assert.eq( 0 , secondary.count() , "initial 3" );
-assert.eq( numDocs , t.count() , "initial 4" )
+assert.eq( numDocs , t.count() , "initial 4" );
-x = primary.find( { _id : { $lt : 500 } } ).batchSize(2)
+x = primary.find( { _id : { $lt : 500 } } ).batchSize(2);
x.next(); // 1. Create an open cursor
-print("start moving chunks...")
+print("start moving chunks...");
// 2. Move chunk from s0 to s1 without waiting for deletion.
// Command returns, but the deletion on s0 will block due to the open cursor.
-s.adminCommand( { moveChunk : "test.foo" , find : { _id : 0 } , to : secondaryName.name } )
+s.adminCommand( { moveChunk : "test.foo" , find : { _id : 0 } , to : secondaryName.name } );
// 3. Start second moveChunk command from s0 to s1.
// This moveChunk should not observe the above deletion as a 'mod', transfer it to s1 and cause deletion on s1.
// This moveChunk will wait for deletion.
-join = startParallelShell( "db.x.insert( {x:1} ); db.adminCommand( { moveChunk : 'test.foo' , find : { _id : " + docsPerChunk * 3 + " } , to : '" + secondaryName.name + "', _waitForDelete: true } )" )
-assert.soon( function(){ return db.x.count() > 0; } , "XXX" , 30000 , 1 )
+join = startParallelShell( "db.x.insert( {x:1} ); db.adminCommand( { moveChunk : 'test.foo' , find : { _id : " + docsPerChunk * 3 + " } , to : '" + secondaryName.name + "', _waitForDelete: true } )" );
+assert.soon( function(){ return db.x.count() > 0; } , "XXX" , 30000 , 1 );
// 4. Close the cursor to enable chunk deletion.
-print( "itcount: " + x.itcount() )
+print( "itcount: " + x.itcount() );
x = null;
-for ( i=0; i<5; i++ ) gc()
+for ( i=0; i<5; i++ ) gc();
-print( "cursor should be gone" )
+print( "cursor should be gone" );
// 5. Waiting for the second moveChunk to finish its deletion.
// Note the deletion for the first moveChunk may not be finished.
@@ -74,7 +74,7 @@ join();
//assert.soon( function(){ return numDocs == t.count(); } , "at end 1" )
// 6. Check the total number of docs on both shards to make sure no doc is lost.
// Use itcount() to ignore orphan docments.
-assert.eq( numDocs , t.find().itcount() , "at end 2" )
+assert.eq( numDocs , t.find().itcount() , "at end 2" );
s.stop();
diff --git a/jstests/sharding/sharding_multiple_ns_rs.js b/jstests/sharding/sharding_multiple_ns_rs.js
index e47c8b60ba3..f83d744527e 100644
--- a/jstests/sharding/sharding_multiple_ns_rs.js
+++ b/jstests/sharding/sharding_multiple_ns_rs.js
@@ -19,7 +19,7 @@ for ( i=0; i<100; i++ ) {
assert.writeOK(bulk.execute());
assert.writeOK(bulk2.execute());
-sh.splitAt( "test.foo" , { _id : 50 } )
+sh.splitAt( "test.foo" , { _id : 50 } );
other = new Mongo( s.s.name );
dbother = other.getDB( "test" );
@@ -43,11 +43,11 @@ assert.eq( 5 , db.foo.findOne( { _id : 5 } ).x );
assert.eq( 5 , db.bar.findOne( { _id : 5 } ).x );
s.adminCommand( { shardcollection : "test.bar" , key : { _id : 1 } } );
-sh.splitAt( "test.bar" , { _id : 50 } )
+sh.splitAt( "test.bar" , { _id : 50 } );
-yetagain = new Mongo( s.s.name )
-assert.eq( 5 , yetagain.getDB( "test" ).bar.findOne( { _id : 5 } ).x )
-assert.eq( 5 , yetagain.getDB( "test" ).foo.findOne( { _id : 5 } ).x )
+yetagain = new Mongo( s.s.name );
+assert.eq( 5 , yetagain.getDB( "test" ).bar.findOne( { _id : 5 } ).x );
+assert.eq( 5 , yetagain.getDB( "test" ).foo.findOne( { _id : 5 } ).x );
assert.eq( 5 , dbother.bar.findOne( { _id : 5 } ).x );
assert.eq( 5 , dbother.foo.findOne( { _id : 5 } ).x );
diff --git a/jstests/sharding/sharding_rs1.js b/jstests/sharding/sharding_rs1.js
index dfcb8f22ea4..4da40b344d1 100644
--- a/jstests/sharding/sharding_rs1.js
+++ b/jstests/sharding/sharding_rs1.js
@@ -46,14 +46,14 @@ s._rs.forEach(function(rsNode) {
}
});
-assert.eq( num , db.foo.find().count() , "C1" )
-assert.eq( num , db.foo.find().itcount() , "C2" )
-assert.eq( num , db.foo.find().sort( { _id : 1 } ).itcount() , "C3" )
-assert.eq( num , db.foo.find().sort( { _id : -1 } ).itcount() , "C4" )
+assert.eq( num , db.foo.find().count() , "C1" );
+assert.eq( num , db.foo.find().itcount() , "C2" );
+assert.eq( num , db.foo.find().sort( { _id : 1 } ).itcount() , "C3" );
+assert.eq( num , db.foo.find().sort( { _id : -1 } ).itcount() , "C4" );
db.foo.ensureIndex( { x : 1 } );
-assert.eq( num , db.foo.find().sort( { x : 1 } ).itcount() , "C5" )
-assert.eq( num , db.foo.find().sort( { x : -1 } ).itcount() , "C6" )
+assert.eq( num , db.foo.find().sort( { x : 1 } ).itcount() , "C5" );
+assert.eq( num , db.foo.find().sort( { x : -1 } ).itcount() , "C6" );
s.stop();
diff --git a/jstests/sharding/sharding_rs2.js b/jstests/sharding/sharding_rs2.js
index 6ab976497c6..b60910d3fb9 100644
--- a/jstests/sharding/sharding_rs2.js
+++ b/jstests/sharding/sharding_rs2.js
@@ -24,14 +24,14 @@ s.ensurePrimaryShard('test', 'test-rs0');
db.foo.save({ _id: 5,x: 17 });
assert.eq(1, db.foo.count());
-s.config.databases.find().forEach(printjson)
-s.config.shards.find().forEach(printjson)
+s.config.databases.find().forEach(printjson);
+s.config.shards.find().forEach(printjson);
var serverName = s.getServerName("test");
function countNodes(){
var x = s.config.shards.findOne({ _id: serverName });
- return x.host.split(",").length
+ return x.host.split(",").length;
}
assert.eq(2, countNodes(), "A1");
@@ -68,12 +68,12 @@ for (var i = 0; i < 5; i++) {
}
}
-jsTest.log("Awaiting replication of all nodes, so spurious sync'ing queries don't upset our counts...")
-rs.test.awaitReplication()
+jsTest.log("Awaiting replication of all nodes, so spurious sync'ing queries don't upset our counts...");
+rs.test.awaitReplication();
// Make sure we wait for secondaries here - otherwise a secondary could come online later and be used for the
// count command before being fully replicated
-jsTest.log("Awaiting secondary status of all nodes")
-rs.test.waitForState(rs.test.getSecondaries(), ReplSetTest.State.SECONDARY, 180 * 1000)
+jsTest.log("Awaiting secondary status of all nodes");
+rs.test.waitForState(rs.test.getSecondaries(), ReplSetTest.State.SECONDARY, 180 * 1000);
// -------------------------------------------------------------------------------------------
// ---------- test routing to slaves ----------------
@@ -82,7 +82,7 @@ rs.test.waitForState(rs.test.getSecondaries(), ReplSetTest.State.SECONDARY, 180
// --- not sharded ----
var m = new Mongo(s.s.name);
-var ts = m.getDB("test").foo
+var ts = m.getDB("test").foo;
var before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
@@ -105,7 +105,7 @@ assert.lte(before.query + 10, after.query, "B3");
// --- add more data ----
-db.foo.ensureIndex({ x: 1 })
+db.foo.ensureIndex({ x: 1 });
var bulk = db.foo.initializeUnorderedBulkOp();
for (var i = 0; i < 100; i++) {
@@ -153,55 +153,55 @@ assert.eq(50, rs.test.getPrimary().getDB("test").foo.count(), "C4");
// by non-shard key
m = new Mongo(s.s.name);
-ts = m.getDB("test").foo
+ts = m.getDB("test").foo;
-before = rs.test.getPrimary().adminCommand("serverStatus").opcounters
+before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
for (var i = 0; i < 10; i++) {
assert.eq(17, ts.findOne({ _id: 5 }).x, "D1");
}
-m.setSlaveOk()
+m.setSlaveOk();
for (var i = 0; i < 10; i++) {
assert.eq(17, ts.findOne({ _id: 5 }).x, "D2");
}
-after = rs.test.getPrimary().adminCommand("serverStatus").opcounters
+after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
-assert.lte(before.query + 10, after.query, "D3")
+assert.lte(before.query + 10, after.query, "D3");
// by shard key
m = new Mongo(s.s.name);
m.forceWriteMode("commands");
-db.printShardingStatus()
+db.printShardingStatus();
-ts = m.getDB("test").foo
+ts = m.getDB("test").foo;
-before = rs.test.getPrimary().adminCommand("serverStatus").opcounters
+before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
for (var i = 0; i < 10; i++) {
assert.eq(57, ts.findOne({ x: 57 }).x, "E1");
}
-m.setSlaveOk()
+m.setSlaveOk();
for (var i = 0; i < 10; i++) {
assert.eq(57, ts.findOne({ x: 57 }).x, "E2");
}
-after = rs.test.getPrimary().adminCommand("serverStatus").opcounters
+after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
-assert.lte(before.query + 10, after.query, "E3")
+assert.lte(before.query + 10, after.query, "E3");
-assert.eq(100, ts.count(), "E4")
-assert.eq(100, ts.find().itcount(), "E5")
-printjson(ts.find().batchSize(5).explain())
+assert.eq(100, ts.count(), "E4");
+assert.eq(100, ts.find().itcount(), "E5");
+printjson(ts.find().batchSize(5).explain());
// fsyncLock the secondaries
rs.test.getSecondaries().forEach(function(secondary) {
assert.commandWorked(secondary.getDB("test").fsyncLock());
-})
+});
// Modify data only on the primary replica of the primary shard.
// { x: 60 } goes to the shard of "rs", which is the primary shard.
assert.writeOK(ts.insert({ primaryOnly: true, x: 60 }));
@@ -211,7 +211,7 @@ assert.eq(0, ts.find({ primaryOnly: true, x: 60 }).itcount());
// Unlock the secondaries
rs.test.getSecondaries().forEach(function(secondary) {
secondary.getDB("test").fsyncUnlock();
-})
+});
// Clean up the data
assert.writeOK(ts.remove({ primaryOnly: true, x: 60 }, { writeConcern: { w: 3 }}));
diff --git a/jstests/sharding/sharding_system_namespaces.js b/jstests/sharding/sharding_system_namespaces.js
index b27c3635c5d..06129357f92 100644
--- a/jstests/sharding/sharding_system_namespaces.js
+++ b/jstests/sharding/sharding_system_namespaces.js
@@ -67,5 +67,5 @@ if (Array.contains(storageEngines, "wiredTiger")) {
checkCollectionOptions(anotherShard.getDB("test"));
}
else {
- print("Skipping test. wiredTiger engine not supported by mongod binary.")
+ print("Skipping test. wiredTiger engine not supported by mongod binary.");
}
diff --git a/jstests/sharding/sort1.js b/jstests/sharding/sort1.js
index 2d32a539c35..08ef71cfa93 100644
--- a/jstests/sharding/sort1.js
+++ b/jstests/sharding/sort1.js
@@ -10,18 +10,18 @@ s.adminCommand( { shardcollection : "test.data" , key : { 'sub.num' : 1 } } );
db = s.getDB( "test" );
-N = 100
+N = 100;
-forward = []
-backward = []
+forward = [];
+backward = [];
for ( i=0; i<N; i++ ){
- db.data.insert( { _id : i , sub: {num : i , x : N - i }} )
- forward.push( i )
- backward.push( ( N - 1 ) - i )
+ db.data.insert( { _id : i , sub: {num : i , x : N - i }} );
+ forward.push( i );
+ backward.push( ( N - 1 ) - i );
}
-s.adminCommand( { split : "test.data" , middle : { 'sub.num' : 33 } } )
-s.adminCommand( { split : "test.data" , middle : { 'sub.num' : 66 } } )
+s.adminCommand( { split : "test.data" , middle : { 'sub.num' : 33 } } );
+s.adminCommand( { split : "test.data" , middle : { 'sub.num' : 66 } } );
s.adminCommand({ movechunk : "test.data",
find : { 'sub.num' : 50 },
@@ -31,7 +31,7 @@ s.adminCommand({ movechunk : "test.data",
assert.lte( 3 , s.config.chunks.find().itcount() , "A1" );
temp = s.config.chunks.find().sort( { min : 1 } ).toArray();
-temp.forEach( printjsononeline )
+temp.forEach( printjsononeline );
z = 0;
for ( ; z<temp.length; z++ )
@@ -44,7 +44,7 @@ assert.neq( temp[z-1].shard , temp[z].shard , "A3" );
temp = db.data.find().sort( { 'sub.num' : 1 } ).toArray();
assert.eq( N , temp.length , "B1" );
for ( i=0; i<100; i++ ){
- assert.eq( i , temp[i].sub.num , "B2" )
+ assert.eq( i , temp[i].sub.num , "B2" );
}
@@ -52,19 +52,19 @@ db.data.find().sort( { 'sub.num' : 1 } ).toArray();
s.getServer("test").getDB( "test" ).data.find().sort( { 'sub.num' : 1 } ).toArray();
a = Date.timeFunc( function(){ z = db.data.find().sort( { 'sub.num' : 1 } ).toArray(); } , 200 );
-assert.eq( 100 , z.length , "C1" )
+assert.eq( 100 , z.length , "C1" );
b = 1.5 * Date.timeFunc( function(){ z = s.getServer("test").getDB( "test" ).data.find().sort( { 'sub.num' : 1 } ).toArray(); } , 200 );
-assert.eq( 67 , z.length , "C2" )
+assert.eq( 67 , z.length , "C2" );
-print( "a: " + a + " b:" + b + " mongos slow down: " + Math.ceil( 100 * ( ( a - b ) / b ) ) + "%" )
+print( "a: " + a + " b:" + b + " mongos slow down: " + Math.ceil( 100 * ( ( a - b ) / b ) ) + "%" );
// -- secondary index sorting
function getSorted( by , dir , proj ){
- var s = {}
+ var s = {};
s[by] = dir || 1;
- printjson( s )
- var cur = db.data.find( {} , proj || {} ).sort( s )
+ printjson( s );
+ var cur = db.data.find( {} , proj || {} ).sort( s );
return terse( cur.map( function(z){ return z.sub.num; } ) );
}
@@ -81,23 +81,23 @@ function terse( a ){
forward = terse(forward);
backward = terse(backward);
-assert.eq( forward , getSorted( "sub.num" , 1 ) , "D1" )
-assert.eq( backward , getSorted( "sub.num" , -1 ) , "D2" )
+assert.eq( forward , getSorted( "sub.num" , 1 ) , "D1" );
+assert.eq( backward , getSorted( "sub.num" , -1 ) , "D2" );
-assert.eq( backward , getSorted( "sub.x" , 1 ) , "D3" )
-assert.eq( forward , getSorted( "sub.x" , -1 ) , "D4" )
+assert.eq( backward , getSorted( "sub.x" , 1 ) , "D3" );
+assert.eq( forward , getSorted( "sub.x" , -1 ) , "D4" );
-assert.eq( backward , getSorted( "sub.x" , 1 , { 'sub.num' : 1 } ) , "D5" )
-assert.eq( forward , getSorted( "sub.x" , -1 , { 'sub.num' : 1 } ) , "D6" )
+assert.eq( backward , getSorted( "sub.x" , 1 , { 'sub.num' : 1 } ) , "D5" );
+assert.eq( forward , getSorted( "sub.x" , -1 , { 'sub.num' : 1 } ) , "D6" );
-assert.eq( backward , getSorted( "sub.x" , 1 , { 'sub' : 1 } ) , "D7" )
-assert.eq( forward , getSorted( "sub.x" , -1 , { 'sub' : 1 } ) , "D8" )
+assert.eq( backward , getSorted( "sub.x" , 1 , { 'sub' : 1 } ) , "D7" );
+assert.eq( forward , getSorted( "sub.x" , -1 , { 'sub' : 1 } ) , "D8" );
-assert.eq( backward , getSorted( "sub.x" , 1 , { '_id' : 0 } ) , "D9" )
-assert.eq( forward , getSorted( "sub.x" , -1 , { '_id' : 0 } ) , "D10" )
+assert.eq( backward , getSorted( "sub.x" , 1 , { '_id' : 0 } ) , "D9" );
+assert.eq( forward , getSorted( "sub.x" , -1 , { '_id' : 0 } ) , "D10" );
-assert.eq( backward , getSorted( "sub.x" , 1 , { '_id' : 0, 'sub.num':1 } ) , "D11" )
-assert.eq( forward , getSorted( "sub.x" , -1 , { '_id' : 0, 'sub.num':1 } ) , "D12" )
+assert.eq( backward , getSorted( "sub.x" , 1 , { '_id' : 0, 'sub.num':1 } ) , "D11" );
+assert.eq( forward , getSorted( "sub.x" , -1 , { '_id' : 0, 'sub.num':1 } ) , "D12" );
s.stop();
diff --git a/jstests/sharding/stale_version_write.js b/jstests/sharding/stale_version_write.js
index 81453497bf6..21680f1abee 100644
--- a/jstests/sharding/stale_version_write.js
+++ b/jstests/sharding/stale_version_write.js
@@ -1,37 +1,37 @@
// Tests whether a reset sharding version triggers errors
-jsTest.log( "Starting sharded cluster..." )
+jsTest.log( "Starting sharded cluster..." );
-var st = new ShardingTest( { shards : 1, mongos : 2, verbose : 2 } )
+var st = new ShardingTest( { shards : 1, mongos : 2, verbose : 2 } );
-st.stopBalancer()
+st.stopBalancer();
-var mongosA = st.s0
-var mongosB = st.s1
+var mongosA = st.s0;
+var mongosB = st.s1;
-jsTest.log( "Adding new collections...")
+jsTest.log( "Adding new collections...");
-var collA = mongosA.getCollection( jsTestName() + ".coll" )
+var collA = mongosA.getCollection( jsTestName() + ".coll" );
assert.writeOK(collA.insert({ hello : "world" }));
-var collB = mongosB.getCollection( "" + collA )
+var collB = mongosB.getCollection( "" + collA );
assert.writeOK(collB.insert({ hello : "world" }));
-jsTest.log( "Enabling sharding..." )
+jsTest.log( "Enabling sharding..." );
-printjson( mongosA.getDB( "admin" ).runCommand({ enableSharding : "" + collA.getDB() }) )
-printjson( mongosA.getDB( "admin" ).runCommand({ shardCollection : "" + collA, key : { _id : 1 } }) )
+printjson( mongosA.getDB( "admin" ).runCommand({ enableSharding : "" + collA.getDB() }) );
+printjson( mongosA.getDB( "admin" ).runCommand({ shardCollection : "" + collA, key : { _id : 1 } }) );
// MongoD doesn't know about the config shard version *until* MongoS tells it
-collA.findOne()
+collA.findOne();
jsTest.log( "Trigger shard version mismatch..." );
assert.writeOK(collB.insert({ goodbye : "world" }));
-print( "Inserted..." )
+print( "Inserted..." );
-assert.eq( 3, collA.find().itcount() )
-assert.eq( 3, collB.find().itcount() )
+assert.eq( 3, collA.find().itcount() );
+assert.eq( 3, collB.find().itcount() );
-st.stop()
+st.stop();
diff --git a/jstests/sharding/startup_with_all_configs_down.js b/jstests/sharding/startup_with_all_configs_down.js
index 1d1ed336997..f88f128a75a 100644
--- a/jstests/sharding/startup_with_all_configs_down.js
+++ b/jstests/sharding/startup_with_all_configs_down.js
@@ -8,7 +8,7 @@
(function() {
"use strict";
-var st = new ShardingTest({shards: 2})
+var st = new ShardingTest({shards: 2});
jsTestLog("Setting up initial data");
diff --git a/jstests/sharding/stats.js b/jstests/sharding/stats.js
index a9696639b7a..ba7221cbe47 100644
--- a/jstests/sharding/stats.js
+++ b/jstests/sharding/stats.js
@@ -30,7 +30,7 @@ s.adminCommand( { shardcollection : "test.zzz" , key : { _id : 1 } } );
N = 10000;
-s.adminCommand( { split : "test.foo" , middle : { _id : N/2 } } )
+s.adminCommand( { split : "test.foo" , middle : { _id : N/2 } } );
s.adminCommand({ moveChunk: "test.foo", find: { _id: 3 },
to: s.getNonPrimaries("test")[0], _waitForDelete: true });
@@ -40,14 +40,14 @@ for ( i=0; i<N; i++ )
assert.writeOK(bulk.execute());
x = db.foo.stats();
-assert.eq( N , x.count , "coll total count expected" )
-assert.eq( db.foo.count() , x.count , "coll total count match" )
-assert.eq( 2 , x.nchunks , "coll chunk num" )
-assert.eq( 2 , numKeys(x.shards) , "coll shard num" )
-assert.eq( N / 2 , x.shards.shard0000.count , "coll count on shard0000 expected" )
-assert.eq( N / 2 , x.shards.shard0001.count , "coll count on shard0001 expected" )
-assert.eq( a.foo.count() , x.shards.shard0000.count , "coll count on shard0000 match" )
-assert.eq( b.foo.count() , x.shards.shard0001.count , "coll count on shard0001 match" )
+assert.eq( N , x.count , "coll total count expected" );
+assert.eq( db.foo.count() , x.count , "coll total count match" );
+assert.eq( 2 , x.nchunks , "coll chunk num" );
+assert.eq( 2 , numKeys(x.shards) , "coll shard num" );
+assert.eq( N / 2 , x.shards.shard0000.count , "coll count on shard0000 expected" );
+assert.eq( N / 2 , x.shards.shard0001.count , "coll count on shard0001 expected" );
+assert.eq( a.foo.count() , x.shards.shard0000.count , "coll count on shard0000 match" );
+assert.eq( b.foo.count() , x.shards.shard0001.count , "coll count on shard0001 match" );
assert(!x.shards.shard0000.indexDetails,
'indexDetails should not be present in shard0000: ' + tojson(x.shards.shard0000));
assert(!x.shards.shard0001.indexDetails,
@@ -64,12 +64,12 @@ x = db.stats();
//dbstats uses Future::CommandResult so raw output uses connection strings not shard names
shards = Object.keySet(x.raw);
-assert.eq( N + (a_extras + b_extras) , x.objects , "db total count expected" )
-assert.eq( 2 , numKeys(x.raw) , "db shard num" )
-assert.eq( (N / 2) + a_extras, x.raw[shards[0]].objects , "db count on shard0000 expected" )
-assert.eq( (N / 2) + b_extras, x.raw[shards[1]].objects , "db count on shard0001 expected" )
-assert.eq( a.stats().objects , x.raw[shards[0]].objects , "db count on shard0000 match" )
-assert.eq( b.stats().objects , x.raw[shards[1]].objects , "db count on shard0001 match" )
+assert.eq( N + (a_extras + b_extras) , x.objects , "db total count expected" );
+assert.eq( 2 , numKeys(x.raw) , "db shard num" );
+assert.eq( (N / 2) + a_extras, x.raw[shards[0]].objects , "db count on shard0000 expected" );
+assert.eq( (N / 2) + b_extras, x.raw[shards[1]].objects , "db count on shard0001 expected" );
+assert.eq( a.stats().objects , x.raw[shards[0]].objects , "db count on shard0000 match" );
+assert.eq( b.stats().objects , x.raw[shards[1]].objects , "db count on shard0001 match" );
/* Test db.stat() and db.collection.stat() scaling */
diff --git a/jstests/sharding/sync_cluster_config/auth_config_down.js b/jstests/sharding/sync_cluster_config/auth_config_down.js
index 02f6679ed51..3f51aa8ada7 100644
--- a/jstests/sharding/sync_cluster_config/auth_config_down.js
+++ b/jstests/sharding/sync_cluster_config/auth_config_down.js
@@ -9,31 +9,31 @@ var st = new ShardingTest({ shards : 1,
mongos : 1,
verbose : 1,
keyFile : "jstests/libs/key1",
- other : { sync : true } })
+ other : { sync : true } });
-var mongos = st.s0
-var configs = st._configServers
+var mongos = st.s0;
+var configs = st._configServers;
-printjson( configs )
+printjson( configs );
mongos.getDB("admin").createUser({user: "root", pwd: "pass", roles: ["root"]});
mongos.getDB("admin").auth("root", "pass");
assert.writeOK(mongos.getCollection( "foo.bar" ).insert({ hello : "world" }));
-var stopOrder = [ 1, 0 ]
+var stopOrder = [ 1, 0 ];
for( var i = 0; i < stopOrder.length; i++ ){
- var configToStop = configs[ stopOrder[i] ]
+ var configToStop = configs[ stopOrder[i] ];
- jsTest.log( "Stopping config server " + stopOrder[i] + " : " + configToStop )
+ jsTest.log( "Stopping config server " + stopOrder[i] + " : " + configToStop );
- MongoRunner.stopMongod( configToStop )
+ MongoRunner.stopMongod( configToStop );
- jsTest.log( "Starting mongos with auth..." )
+ jsTest.log( "Starting mongos with auth..." );
var mongosWithAuth = MongoRunner.runMongos({ keyFile : "jstests/libs/key1",
- configdb : mongos.savedOptions.configdb })
+ configdb : mongos.savedOptions.configdb });
var foodb = mongosWithAuth.getDB('foo');
mongosWithAuth.getDB("admin").auth("root", "pass");
var res = foodb.bar.findOne();
@@ -60,7 +60,7 @@ for (var i = 0; i < configs.length; i++) {
assert.eq(0, configs[i].getDB('foo').getUsers().length);
}
-jsTest.log( "DONE!" )
+jsTest.log( "DONE!" );
-st.stop()
+st.stop();
diff --git a/jstests/sharding/sync_cluster_config/dbhash_cache.js b/jstests/sharding/sync_cluster_config/dbhash_cache.js
index 7cffa24963c..d61066df47a 100644
--- a/jstests/sharding/sync_cluster_config/dbhash_cache.js
+++ b/jstests/sharding/sync_cluster_config/dbhash_cache.js
@@ -8,14 +8,14 @@ st.stopBalancer();
var mongos = st.s0;
var shards = mongos.getCollection( "config.shards" ).find().toArray();
var admin = mongos.getDB( "admin" );
-var configs = st._configServers
+var configs = st._configServers;
assert(admin.runCommand({ enablesharding : "test" }).ok);
printjson(admin.runCommand({ movePrimary : "test", to : shards[0]._id }));
assert(admin.runCommand({ shardcollection : "test.foo" , key : { x : 1 } }).ok);
mongos.getCollection("test.foo").insert({x:1});
-assert.eq(1, st.config.chunks.count(), "there should only be 1 chunk")
+assert.eq(1, st.config.chunks.count(), "there should only be 1 chunk");
var dbhash1 = configs[0].getDB("config").runCommand( "dbhash");
printjson("dbhash before split and move is " + dbhash1.collections.chunks);
@@ -28,11 +28,11 @@ assert( admin.runCommand({ moveChunk : "test.foo",
_waitForDelete : true }).ok );
st.printShardingStatus();
-assert.eq(2, st.config.chunks.count(), "there should be 2 chunks")
+assert.eq(2, st.config.chunks.count(), "there should be 2 chunks");
var dbhash2 = configs[0].getDB("config").runCommand("dbhash");
printjson("dbhash after split and move is " + dbhash2.collections.chunks);
-assert.neq(dbhash1.collections.chunks, dbhash2.collections.chunks, "The hash should be different after split and move." )
+assert.neq(dbhash1.collections.chunks, dbhash2.collections.chunks, "The hash should be different after split and move." );
st.stop();
diff --git a/jstests/sharding/sync_cluster_config/empty_cluster_init.js b/jstests/sharding/sync_cluster_config/empty_cluster_init.js
index 00200d711bc..87a70463107 100644
--- a/jstests/sharding/sync_cluster_config/empty_cluster_init.js
+++ b/jstests/sharding/sync_cluster_config/empty_cluster_init.js
@@ -4,7 +4,7 @@
// version initialization.
//
-jsTest.log("Start config servers...")
+jsTest.log("Start config servers...");
var configSvrA = MongoRunner.runMongod({ configsvr: "", journal: "", verbose : 2 });
var configSvrB = MongoRunner.runMongod({ configsvr: "", journal: "", verbose : 2 });
diff --git a/jstests/sharding/sync_cluster_config/sync2.js b/jstests/sharding/sync_cluster_config/sync2.js
index fa9c77b47fb..c3d2f4cc0fa 100644
--- a/jstests/sharding/sync_cluster_config/sync2.js
+++ b/jstests/sharding/sync_cluster_config/sync2.js
@@ -68,7 +68,7 @@ assert.eq( 50 , s.config.big.find().itcount() , "C2" );
assert.eq( 50 , s.config.big.find().count() , "C3" );
assert.eq( 50 , s.config.big.find().batchSize(5).itcount() , "C4" );
-var hashes = []
+var hashes = [];
for ( i=0; i<3; i++ ){
print( i );
diff --git a/jstests/sharding/tag_auto_split.js b/jstests/sharding/tag_auto_split.js
index 8d8e4a35ef7..c138fc788fa 100644
--- a/jstests/sharding/tag_auto_split.js
+++ b/jstests/sharding/tag_auto_split.js
@@ -8,16 +8,16 @@ var s = new ShardingTest({ name: "tag_auto_split",
db = s.getDB( "test" );
-s.adminCommand( { enablesharding : "test" } )
+s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
assert.eq( 1, s.config.chunks.count() );
-sh.addShardTag( "shard0000" , "a" )
+sh.addShardTag( "shard0000" , "a" );
-sh.addTagRange( "test.foo" , { _id : 5 } , { _id : 10 } , "a" )
-sh.addTagRange( "test.foo" , { _id : 10 } , { _id : 15 } , "b" )
+sh.addTagRange( "test.foo" , { _id : 5 } , { _id : 10 } , "a" );
+sh.addTagRange( "test.foo" , { _id : 10 } , { _id : 15 } , "b" );
assert.soon( function() {
//printjson( sh.status() );
@@ -36,16 +36,16 @@ s = new ShardingTest({ name: "tag_auto_split2",
db = s.getDB( "test" );
-s.adminCommand( { enablesharding : "test" } )
+s.adminCommand( { enablesharding : "test" } );
s.ensurePrimaryShard('test', 'shard0001');
s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1, a : 1 } } );
assert.eq( 1, s.config.chunks.count() );
-sh.addShardTag( "shard0000" , "a" )
+sh.addShardTag( "shard0000" , "a" );
-sh.addTagRange( "test.foo" , { _id : 5 } , { _id : 10 } , "a" )
-sh.addTagRange( "test.foo" , { _id : 10 } , { _id : 15 } , "b" )
+sh.addTagRange( "test.foo" , { _id : 5 } , { _id : 10 } , "a" );
+sh.addTagRange( "test.foo" , { _id : 10 } , { _id : 15 } , "b" );
assert.soon( function() {
//printjson( sh.status() );
diff --git a/jstests/sharding/test_stacked_migration_cleanup.js b/jstests/sharding/test_stacked_migration_cleanup.js
index ee217b5bce7..fc4a71933ed 100644
--- a/jstests/sharding/test_stacked_migration_cleanup.js
+++ b/jstests/sharding/test_stacked_migration_cleanup.js
@@ -22,7 +22,7 @@ for (var i = 0; i < numChunks; i++) {
assert.commandWorked(mongos.adminCommand({ split : coll + "", middle : { _id : i } }));
}
-jsTest.log("Inserting a lot of small documents...")
+jsTest.log("Inserting a lot of small documents...");
// Insert a lot of small documents to make multiple cursor batches
var bulk = coll.initializeUnorderedBulkOp();
@@ -37,14 +37,14 @@ jsTest.log("Opening a mongod cursor...");
var cursor = coll.find();
var next = cursor.next();
-jsTest.log("Moving a bunch of chunks to stack cleanup...")
+jsTest.log("Moving a bunch of chunks to stack cleanup...");
// Move a bunch of chunks, but don't close the cursor so they stack.
for (var i = 0; i < numChunks; i++) {
- printjson(mongos.adminCommand({ moveChunk : coll + "", find : { _id : i }, to : shards[1]._id }))
+ printjson(mongos.adminCommand({ moveChunk : coll + "", find : { _id : i }, to : shards[1]._id }));
}
-jsTest.log("Dropping and re-creating collection...")
+jsTest.log("Dropping and re-creating collection...");
coll.drop();
@@ -56,10 +56,10 @@ assert.writeOK(bulk.execute());
sleep(10 * 1000);
-jsTest.log("Checking that documents were not cleaned up...")
+jsTest.log("Checking that documents were not cleaned up...");
for (var i = 0; i < numChunks; i++) {
- assert.neq(null, coll.findOne({ _id : i }))
+ assert.neq(null, coll.findOne({ _id : i }));
}
st.stop();
diff --git a/jstests/sharding/unowned_doc_filtering.js b/jstests/sharding/unowned_doc_filtering.js
index b4fa6a60398..e54cb21f3c7 100644
--- a/jstests/sharding/unowned_doc_filtering.js
+++ b/jstests/sharding/unowned_doc_filtering.js
@@ -17,7 +17,7 @@ assert.commandWorked(testDB.adminCommand({ enableSharding: 'test' }));
st.ensurePrimaryShard('test', 'shard0000');
assert.commandWorked(testDB.adminCommand({ shardCollection: 'test.foo', key: { x: 1 }}));
-var inserts = []
+var inserts = [];
for (var i = 0; i < 100; i++) {
inserts.push({x:i});
}
diff --git a/jstests/sharding/update_immutable_fields.js b/jstests/sharding/update_immutable_fields.js
index e90ecb7e037..7f82d84600b 100644
--- a/jstests/sharding/update_immutable_fields.js
+++ b/jstests/sharding/update_immutable_fields.js
@@ -1,7 +1,7 @@
// Tests that updates can't change immutable fields (used in sharded system)
var st = new ShardingTest({shards : 2,
mongos : 1,
- verbose : 0})
+ verbose : 0});
st.stopBalancer();
var mongos = st.s;
@@ -9,9 +9,9 @@ var config = mongos.getDB("config");
var coll = mongos.getCollection(jsTestName() + ".coll1");
var shard0 = st.shard0;
-printjson(config.adminCommand({enableSharding : coll.getDB() + ""}))
+printjson(config.adminCommand({enableSharding : coll.getDB() + ""}));
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0000');
-printjson(config.adminCommand({shardCollection : "" + coll, key : {a : 1}}))
+printjson(config.adminCommand({shardCollection : "" + coll, key : {a : 1}}));
var getDirectShardedConn = function( st, collName ) {
@@ -34,23 +34,23 @@ var getDirectShardedConn = function( st, collName ) {
assert.commandWorked( shardConnWithVersion.getDB( "admin" ).runCommand( ssvInitCmd ) );
return shardConnWithVersion;
-}
+};
var shard0Coll = getDirectShardedConn(st, coll.getFullName()).getCollection(coll.getFullName());
// No shard key
-shard0Coll.remove({})
+shard0Coll.remove({});
assert.writeError(shard0Coll.save({ _id: 3 }));
// Full shard key in save
assert.writeOK(shard0Coll.save({ _id: 1, a: 1 }));
// Full shard key on replacement (basically the same as above)
-shard0Coll.remove({})
+shard0Coll.remove({});
assert.writeOK(shard0Coll.update({ _id: 1 }, { a: 1 }, true));
// Full shard key after $set
-shard0Coll.remove({})
+shard0Coll.remove({});
assert.writeOK(shard0Coll.update({ _id: 1 }, { $set: { a: 1 }}, true));
// Update existing doc (replacement), same shard key value
@@ -66,7 +66,7 @@ assert.writeError(shard0Coll.update({ _id: 1 }, { b: 1 }));
assert.writeError(shard0Coll.update({ _id: 1 }, { $unset: { a: 1 }}));
// Error due to removing all the embedded fields.
-shard0Coll.remove({})
+shard0Coll.remove({});
assert.writeOK(shard0Coll.save({ _id: 2, a: { c: 1, b: 1 }}));
diff --git a/jstests/sharding/update_sharded.js b/jstests/sharding/update_sharded.js
index 805cda0c487..e76521f2377 100644
--- a/jstests/sharding/update_sharded.js
+++ b/jstests/sharding/update_sharded.js
@@ -11,7 +11,7 @@ s.ensurePrimaryShard('test', 'shard0001');
s.adminCommand( { shardcollection : "test.update0" , key : { key : 1 } } );
s.adminCommand( { shardcollection : "test.update1" , key : { key : "hashed" } } );
-db = s.getDB( "test" )
+db = s.getDB( "test" );
for(i=0; i < 2; i++){
coll = db.getCollection("update" + i);
@@ -22,9 +22,9 @@ for(i=0; i < 2; i++){
coll.save({_id:2, key:2});
coll.update({_id:3, key:3}, {$set: {foo: 'bar'}}, {upsert: true});
- assert.eq(coll.count(), 3, "count A")
- assert.eq(coll.findOne({_id:3}).key, 3 , "findOne 3 key A")
- assert.eq(coll.findOne({_id:3}).foo, 'bar' , "findOne 3 foo A")
+ assert.eq(coll.count(), 3, "count A");
+ assert.eq(coll.findOne({_id:3}).key, 3 , "findOne 3 key A");
+ assert.eq(coll.findOne({_id:3}).foo, 'bar' , "findOne 3 foo A");
// update existing using save()
coll.save({_id:1, key:1, other:1});
@@ -37,9 +37,9 @@ for(i=0; i < 2; i++){
coll.save( {_id:4, key:4} );
coll.update({key:4}, {key:4, other:4});
assert.eq( coll.find({key:4, other:4}).count() , 1 , 'replacement update error');
- coll.remove( {_id:4} )
+ coll.remove( {_id:4} );
- assert.eq(coll.count(), 3, "count B")
+ assert.eq(coll.count(), 3, "count B");
coll.find().forEach(function(x){
assert.eq(x._id, x.key, "_id == key");
assert.eq(x._id, x.other, "_id == other");
@@ -51,10 +51,10 @@ for(i=0; i < 2; i++){
assert.writeOK(coll.update({ _id: 1, key: 1 }, { $set: { foo: 2 }}));
coll.update( { key : 17 } , { $inc : { x : 5 } } , true );
- assert.eq( 5 , coll.findOne( { key : 17 } ).x , "up1" )
+ assert.eq( 5 , coll.findOne( { key : 17 } ).x , "up1" );
coll.update( { key : 18 } , { $inc : { x : 5 } } , true , true );
- assert.eq( 5 , coll.findOne( { key : 18 } ).x , "up2" )
+ assert.eq( 5 , coll.findOne( { key : 18 } ).x , "up2" );
// Make sure we can extract exact _id from certain queries
assert.writeOK(coll.update({_id : ObjectId()}, {$set : {x : 1}}, {multi : false}));
diff --git a/jstests/sharding/user_flags_sharded.js b/jstests/sharding/user_flags_sharded.js
index f2a8d626492..1ceadd1b2fd 100644
--- a/jstests/sharding/user_flags_sharded.js
+++ b/jstests/sharding/user_flags_sharded.js
@@ -17,7 +17,7 @@ if (jsTest.options().storageEngine === "mmapv1") {
db1.createCollection( coll );
// Then verify the new collection has userFlags set to 0
- var collstats = db1.getCollection( coll ).stats()
+ var collstats = db1.getCollection( coll ).stats();
print( "*************** Fresh Collection Stats ************" );
printjson( collstats );
assert.eq( collstats.userFlags , 1 , "fresh collection doesn't have userFlags = 1 ");
@@ -33,7 +33,7 @@ if (jsTest.options().storageEngine === "mmapv1") {
}
// Next verify that userFlags has changed to 0
- collstats = db1.getCollection( coll ).stats()
+ collstats = db1.getCollection( coll ).stats();
print( "*************** Collection Stats After CollMod ************" );
printjson( collstats );
assert.eq( collstats.userFlags , 0 , "modified collection should have userFlags = 0 ");
@@ -52,7 +52,7 @@ if (jsTest.options().storageEngine === "mmapv1") {
print( "*************** Collection Stats On Other Shard ************" );
var shard2 = s._connections[0].getDB( dbname );
- shard2stats = shard2.getCollection( coll ).stats()
+ shard2stats = shard2.getCollection( coll ).stats();
printjson( shard2stats );
assert.eq( shard2stats.count , numdocs , "moveChunk didn't succeed" );
diff --git a/jstests/sharding/zbigMapReduce.js b/jstests/sharding/zbigMapReduce.js
index d10e81e2655..d9b771b3d51 100644
--- a/jstests/sharding/zbigMapReduce.js
+++ b/jstests/sharding/zbigMapReduce.js
@@ -80,17 +80,17 @@ function runTest(s) {
s.printChangeLog();
function map() { emit('count', 1); }
- function reduce(key, values) { return Array.sum(values) }
+ function reduce(key, values) { return Array.sum(values); }
jsTest.log("Test basic mapreduce...");
// Test basic mapReduce
for (var iter = 0; iter < 5; iter++) {
print("Test #" + iter);
- out = db.foo.mapReduce(map, reduce,"big_out")
+ out = db.foo.mapReduce(map, reduce,"big_out");
}
- print("Testing output to different db...")
+ print("Testing output to different db...");
// test output to a different DB
// do it multiple times so that primary shard changes
@@ -102,9 +102,9 @@ function runTest(s) {
outCollStr = "mr_replace_col_" + iter;
outDbStr = "mr_db_" + iter;
- print("Testing mr replace into DB " + iter)
+ print("Testing mr replace into DB " + iter);
- res = db.foo.mapReduce(map , reduce , { out: { replace: outCollStr, db: outDbStr } })
+ res = db.foo.mapReduce(map , reduce , { out: { replace: outCollStr, db: outDbStr } });
printjson(res);
outDb = s.getDB(outDbStr);
@@ -119,7 +119,7 @@ function runTest(s) {
assert.eq(res.result.db, outDbStr, "Wrong db " + res.result.db);
}
- jsTest.log("Verifying nonatomic M/R throws...")
+ jsTest.log("Verifying nonatomic M/R throws...");
// check nonAtomic output
assert.throws(function() {
@@ -142,8 +142,8 @@ function runTest(s) {
jsTest.log("No errors...");
- map2 = function() { emit(this.val, 1); }
- reduce2 = function(key, values) { return Array.sum(values); }
+ map2 = function() { emit(this.val, 1); };
+ reduce2 = function(key, values) { return Array.sum(values); };
// Test merge
outcol = "big_out_merge";