summaryrefslogtreecommitdiff
path: root/jstests/sharding
diff options
context:
space:
mode:
authorMark Benvenuto <mark.benvenuto@mongodb.com>2016-05-28 17:55:12 -0400
committerMark Benvenuto <mark.benvenuto@mongodb.com>2016-05-28 17:55:12 -0400
commit6dcdd23dd37ef12c87e71cf59ef01cd82432efe0 (patch)
treec8cfb5acb62c80f375bc37e7d4350382deea6a37 /jstests/sharding
parentd4ac5673ea3f6cef4ce9dbcec90e31813997a528 (diff)
downloadmongo-6dcdd23dd37ef12c87e71cf59ef01cd82432efe0.tar.gz
SERVER-23971 Clang-Format code
Diffstat (limited to 'jstests/sharding')
-rw-r--r--jstests/sharding/SERVER-7379.js6
-rw-r--r--jstests/sharding/addshard2.js31
-rw-r--r--jstests/sharding/auth.js81
-rw-r--r--jstests/sharding/authCommands.js30
-rw-r--r--jstests/sharding/auth_add_shard.js14
-rw-r--r--jstests/sharding/auth_no_config_primary.js4
-rw-r--r--jstests/sharding/auth_slaveok_routing.js4
-rw-r--r--jstests/sharding/authmr.js8
-rw-r--r--jstests/sharding/authwhere.js8
-rw-r--r--jstests/sharding/auto2.js21
-rw-r--r--jstests/sharding/balance_repl.js10
-rw-r--r--jstests/sharding/batch_write_command_sharded.js5
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js6
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_prereload.js21
-rw-r--r--jstests/sharding/coll_epoch_test0.js25
-rw-r--r--jstests/sharding/commands_that_write_accept_wc_configRS.js9
-rw-r--r--jstests/sharding/commands_that_write_accept_wc_shards.js9
-rw-r--r--jstests/sharding/conf_server_write_concern.js8
-rw-r--r--jstests/sharding/covered_shard_key_indexes.js21
-rw-r--r--jstests/sharding/delete_during_migrate.js4
-rw-r--r--jstests/sharding/drop_sharded_db.js4
-rw-r--r--jstests/sharding/empty_doc_results.js5
-rw-r--r--jstests/sharding/explain_cmd.js6
-rw-r--r--jstests/sharding/explain_find_and_modify_sharded.js4
-rw-r--r--jstests/sharding/explain_read_pref.js46
-rw-r--r--jstests/sharding/fair_balancer_round.js8
-rw-r--r--jstests/sharding/features1.js23
-rw-r--r--jstests/sharding/features2.js11
-rw-r--r--jstests/sharding/fts_score_sort_sharded.js5
-rw-r--r--jstests/sharding/geo_near_random1.js4
-rw-r--r--jstests/sharding/geo_near_random2.js6
-rw-r--r--jstests/sharding/geo_shardedgeonear.js7
-rw-r--r--jstests/sharding/group_slaveok.js13
-rw-r--r--jstests/sharding/hash_shard1.js4
-rw-r--r--jstests/sharding/index1.js16
-rw-r--r--jstests/sharding/key_many.js44
-rw-r--r--jstests/sharding/key_string.js9
-rw-r--r--jstests/sharding/lagged_config_secondary.js4
-rw-r--r--jstests/sharding/limit_push.js4
-rw-r--r--jstests/sharding/localhostAuthBypass.js8
-rw-r--r--jstests/sharding/max_time_ms_sharded.js42
-rw-r--r--jstests/sharding/migrateBig.js7
-rw-r--r--jstests/sharding/migration_failure.js4
-rw-r--r--jstests/sharding/min_optime_recovery.js10
-rw-r--r--jstests/sharding/mongos_no_replica_set_refresh.js6
-rw-r--r--jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js10
-rw-r--r--jstests/sharding/mongos_rs_shard_failure_tolerance.js4
-rw-r--r--jstests/sharding/mongos_shard_failure_tolerance.js4
-rw-r--r--jstests/sharding/move_chunk_basic.js8
-rw-r--r--jstests/sharding/move_chunk_wc.js25
-rw-r--r--jstests/sharding/move_stale_mongos.js8
-rw-r--r--jstests/sharding/movechunk_with_default_paranoia.js8
-rw-r--r--jstests/sharding/movechunk_with_moveParanoia.js8
-rw-r--r--jstests/sharding/movechunk_with_noMoveParanoia.js8
-rw-r--r--jstests/sharding/mrShardedOutput.js22
-rw-r--r--jstests/sharding/mr_shard_version.js6
-rw-r--r--jstests/sharding/multi_mongos2.js6
-rw-r--r--jstests/sharding/no_empty_reset.js8
-rw-r--r--jstests/sharding/pending_chunk.js12
-rw-r--r--jstests/sharding/prefix_shard_key.js34
-rw-r--r--jstests/sharding/printShardingStatus.js5
-rw-r--r--jstests/sharding/query_config.js52
-rw-r--r--jstests/sharding/read_pref.js8
-rw-r--r--jstests/sharding/read_pref_cmd.js68
-rw-r--r--jstests/sharding/regex_targeting.js15
-rw-r--r--jstests/sharding/replmonitor_bad_seed.js4
-rw-r--r--jstests/sharding/secondary_query_routing.js4
-rw-r--r--jstests/sharding/shard2.js18
-rw-r--r--jstests/sharding/shard_aware_init.js11
-rw-r--r--jstests/sharding/shard_aware_primary_failover.js4
-rw-r--r--jstests/sharding/shard_identity_config_update.js4
-rw-r--r--jstests/sharding/sharding_balance1.js16
-rw-r--r--jstests/sharding/sharding_balance4.js8
-rw-r--r--jstests/sharding/sharding_options.js8
-rw-r--r--jstests/sharding/sharding_rs2.js10
-rw-r--r--jstests/sharding/sharding_state_after_stepdown.js70
-rw-r--r--jstests/sharding/sort1.js7
-rw-r--r--jstests/sharding/split_large_key.js5
-rw-r--r--jstests/sharding/split_with_force.js16
-rw-r--r--jstests/sharding/stale_mongos_updates_and_removes.js41
-rw-r--r--jstests/sharding/stats.js4
-rw-r--r--jstests/sharding/top_chunk_autosplit.js54
82 files changed, 490 insertions, 738 deletions
diff --git a/jstests/sharding/SERVER-7379.js b/jstests/sharding/SERVER-7379.js
index bdf311cbf6e..a98161f101e 100644
--- a/jstests/sharding/SERVER-7379.js
+++ b/jstests/sharding/SERVER-7379.js
@@ -7,11 +7,7 @@ st.adminCommand(
var db = st.s.getDB('test');
var offerChange = db.getCollection('offerChange');
-var testDoc = {
- "_id": 123,
- "categoryId": 9881,
- "store": "NEW"
-};
+var testDoc = {"_id": 123, "categoryId": 9881, "store": "NEW"};
offerChange.remove({}, false);
offerChange.insert(testDoc);
diff --git a/jstests/sharding/addshard2.js b/jstests/sharding/addshard2.js
index 7af23a4ab5b..6579d89686d 100644
--- a/jstests/sharding/addshard2.js
+++ b/jstests/sharding/addshard2.js
@@ -55,13 +55,15 @@
assert.eq("add_shard2_rs1", shard._id, "t2 name");
// step 3. replica set w/ name given
- assert(s.admin.runCommand({
- "addshard": "add_shard2_rs2/" + getHostName() + ":" + master2.port,
- "name": "myshard"
- }).ok,
+ assert(s.admin
+ .runCommand({
+ "addshard": "add_shard2_rs2/" + getHostName() + ":" + master2.port,
+ "name": "myshard"
+ })
+ .ok,
"failed to add shard in step 4");
- shard = s.getDB("config")
- .shards.findOne({"_id": {"$nin": ["shard0000", "bar", "add_shard2_rs1"]}});
+ shard =
+ s.getDB("config").shards.findOne({"_id": {"$nin": ["shard0000", "bar", "add_shard2_rs1"]}});
assert(shard, "shard wasn't found");
assert.eq("myshard", shard._id, "t3 name");
@@ -77,15 +79,18 @@
// step 5. replica set w/ a wrong host
var portWithoutHostRunning = allocatePort();
- assert(!s.admin.runCommand(
- {addshard: "add_shard2_rs2/NonExistingHost:" + portWithoutHostRunning}).ok,
- "accepted bad hostname in step 5");
+ assert(
+ !s.admin.runCommand({addshard: "add_shard2_rs2/NonExistingHost:" + portWithoutHostRunning})
+ .ok,
+ "accepted bad hostname in step 5");
// step 6. replica set w/ mixed wrong/right hosts
- assert(!s.admin.runCommand({
- addshard: "add_shard2_rs2/" + getHostName() + ":" + master2.port + ",foo:" +
- portWithoutHostRunning
- }).ok,
+ assert(!s.admin
+ .runCommand({
+ addshard: "add_shard2_rs2/" + getHostName() + ":" + master2.port + ",foo:" +
+ portWithoutHostRunning
+ })
+ .ok,
"accepted bad hostname in step 6");
// Cannot add invalid stand alone host.
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index ea3ed974cc5..f3fe71a5950 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -3,23 +3,11 @@
(function() {
'use strict';
- var adminUser = {
- db: "admin",
- username: "foo",
- password: "bar"
- };
-
- var testUser = {
- db: "test",
- username: "bar",
- password: "baz"
- };
-
- var testUserReadOnly = {
- db: "test",
- username: "sad",
- password: "bat"
- };
+ var adminUser = {db: "admin", username: "foo", password: "bar"};
+
+ var testUser = {db: "test", username: "bar", password: "baz"};
+
+ var testUserReadOnly = {db: "test", username: "sad", password: "bat"};
function login(userObj, thingToUse) {
if (!thingToUse) {
@@ -58,9 +46,11 @@
}
print("Configuration: Add user " + tojson(adminUser));
- s.getDB(adminUser.db)
- .createUser(
- {user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles});
+ s.getDB(adminUser.db).createUser({
+ user: adminUser.username,
+ pwd: adminUser.password,
+ roles: jsTest.adminUserRoles
+ });
login(adminUser);
// Set the chunk size, disable the secondary throttle (so the test doesn't run so slow)
@@ -80,11 +70,9 @@
d1.initiate();
print("d1 initiated");
- var shardName = authutil.asCluster(d1.nodes,
- "jstests/libs/key2",
- function() {
- return getShardName(d1);
- });
+ var shardName = authutil.asCluster(d1.nodes, "jstests/libs/key2", function() {
+ return getShardName(d1);
+ });
print("adding shard w/out auth " + shardName);
logout(adminUser);
@@ -124,15 +112,16 @@
d1.waitForState(d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
- s.getDB(testUser.db)
- .createUser(
- {user: testUser.username, pwd: testUser.password, roles: jsTest.basicUserRoles});
- s.getDB(testUserReadOnly.db)
- .createUser({
- user: testUserReadOnly.username,
- pwd: testUserReadOnly.password,
- roles: jsTest.readOnlyUserRoles
- });
+ s.getDB(testUser.db).createUser({
+ user: testUser.username,
+ pwd: testUser.password,
+ roles: jsTest.basicUserRoles
+ });
+ s.getDB(testUserReadOnly.db).createUser({
+ user: testUserReadOnly.username,
+ pwd: testUserReadOnly.password,
+ roles: jsTest.readOnlyUserRoles
+ });
logout(adminUser);
@@ -162,11 +151,9 @@
d2.initiate();
d2.awaitSecondaryNodes();
- shardName = authutil.asCluster(d2.nodes,
- "jstests/libs/key1",
- function() {
- return getShardName(d2);
- });
+ shardName = authutil.asCluster(d2.nodes, "jstests/libs/key1", function() {
+ return getShardName(d2);
+ });
print("adding shard " + shardName);
login(adminUser);
@@ -254,16 +241,12 @@
d1.waitForState(d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
d2.waitForState(d2.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
- authutil.asCluster(d1.nodes,
- "jstests/libs/key1",
- function() {
- d1.awaitReplication(120000);
- });
- authutil.asCluster(d2.nodes,
- "jstests/libs/key1",
- function() {
- d2.awaitReplication(120000);
- });
+ authutil.asCluster(d1.nodes, "jstests/libs/key1", function() {
+ d1.awaitReplication(120000);
+ });
+ authutil.asCluster(d2.nodes, "jstests/libs/key1", function() {
+ d2.awaitReplication(120000);
+ });
// add admin on shard itself, hack to prevent localhost auth bypass
d1.getPrimary()
diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js
index 03e77848974..deb6512a6b0 100644
--- a/jstests/sharding/authCommands.js
+++ b/jstests/sharding/authCommands.js
@@ -3,10 +3,7 @@
*/
var doTest = function() {
- var rsOpts = {
- oplogSize: 10,
- useHostname: false
- };
+ var rsOpts = {oplogSize: 10, useHostname: false};
var st = new ShardingTest({
keyFile: 'jstests/libs/key1',
shards: 2,
@@ -132,12 +129,10 @@ var doTest = function() {
assert.eq(100, res.results.length);
assert.eq(45, res.results[0].value);
- res = checkCommandSucceeded(
- testDB,
- {
- aggregate: 'foo',
- pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}]
- });
+ res = checkCommandSucceeded(testDB, {
+ aggregate: 'foo',
+ pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}]
+ });
assert.eq(4500, res.result[0].sum);
} else {
print("Checking read operations, should fail");
@@ -148,12 +143,10 @@ var doTest = function() {
checkCommandFailed(testDB, {collstats: 'foo'});
checkCommandFailed(testDB,
{mapreduce: 'foo', map: map, reduce: reduce, out: {inline: 1}});
- checkCommandFailed(
- testDB,
- {
- aggregate: 'foo',
- pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}]
- });
+ checkCommandFailed(testDB, {
+ aggregate: 'foo',
+ pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}]
+ });
}
};
@@ -233,10 +226,7 @@ var doTest = function() {
checkCommandSucceeded(adminDB, {isdbgrid: 1});
checkCommandSucceeded(adminDB, {ismaster: 1});
checkCommandFailed(adminDB, {split: 'test.foo', find: {i: 1, j: 1}});
- chunkKey = {
- i: {$minKey: 1},
- j: {$minKey: 1}
- };
+ chunkKey = {i: {$minKey: 1}, j: {$minKey: 1}};
checkCommandFailed(
adminDB,
{moveChunk: 'test.foo', find: chunkKey, to: st.rs1.name, _waitForDelete: true});
diff --git a/jstests/sharding/auth_add_shard.js b/jstests/sharding/auth_add_shard.js
index 4f0fec6de83..b24afd0172c 100644
--- a/jstests/sharding/auth_add_shard.js
+++ b/jstests/sharding/auth_add_shard.js
@@ -11,11 +11,7 @@
}
// admin user object
- var adminUser = {
- db: "admin",
- username: "foo",
- password: "bar"
- };
+ var adminUser = {db: "admin", username: "foo", password: "bar"};
// set up a 2 shard cluster with keyfile
var st = new ShardingTest(
@@ -28,9 +24,11 @@
// add the admin user
print("adding user");
- mongos.getDB(adminUser.db)
- .createUser(
- {user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles});
+ mongos.getDB(adminUser.db).createUser({
+ user: adminUser.username,
+ pwd: adminUser.password,
+ roles: jsTest.adminUserRoles
+ });
// login as admin user
login(adminUser);
diff --git a/jstests/sharding/auth_no_config_primary.js b/jstests/sharding/auth_no_config_primary.js
index a4be8806f66..3bb1ea1cf4c 100644
--- a/jstests/sharding/auth_no_config_primary.js
+++ b/jstests/sharding/auth_no_config_primary.js
@@ -33,8 +33,8 @@
assert.eq('world', res.hello);
// Test authenticate through new mongos.
- var otherMongos = MongoRunner.runMongos(
- {keyFile: "jstests/libs/key1", configdb: st.s.savedOptions.configdb});
+ var otherMongos =
+ MongoRunner.runMongos({keyFile: "jstests/libs/key1", configdb: st.s.savedOptions.configdb});
assert.commandFailedWithCode(otherMongos.getDB('test').runCommand({find: 'user'}),
ErrorCodes.Unauthorized);
diff --git a/jstests/sharding/auth_slaveok_routing.js b/jstests/sharding/auth_slaveok_routing.js
index a01314fe405..c950730c799 100644
--- a/jstests/sharding/auth_slaveok_routing.js
+++ b/jstests/sharding/auth_slaveok_routing.js
@@ -30,9 +30,7 @@ function doesRouteToSec(coll, query) {
return cmdRes.secondary;
}
-var rsOpts = {
- oplogSize: 50
-};
+var rsOpts = {oplogSize: 50};
var st = new ShardingTest(
{keyFile: 'jstests/libs/key1', shards: 1, rs: rsOpts, other: {nopreallocj: 1}});
diff --git a/jstests/sharding/authmr.js b/jstests/sharding/authmr.js
index 6484c729474..67f87fc59b4 100644
--- a/jstests/sharding/authmr.js
+++ b/jstests/sharding/authmr.js
@@ -10,12 +10,8 @@
var adminUser = {
user: "admin",
pwd: "a",
- roles: [
- "readWriteAnyDatabase",
- "dbAdminAnyDatabase",
- "userAdminAnyDatabase",
- "clusterAdmin"
- ]
+ roles:
+ ["readWriteAnyDatabase", "dbAdminAnyDatabase", "userAdminAnyDatabase", "clusterAdmin"]
};
var test1User = {
diff --git a/jstests/sharding/authwhere.js b/jstests/sharding/authwhere.js
index df27078784b..f9af413f470 100644
--- a/jstests/sharding/authwhere.js
+++ b/jstests/sharding/authwhere.js
@@ -10,12 +10,8 @@
var adminUser = {
user: "admin",
pwd: "a",
- roles: [
- "readWriteAnyDatabase",
- "dbAdminAnyDatabase",
- "userAdminAnyDatabase",
- "clusterAdmin"
- ]
+ roles:
+ ["readWriteAnyDatabase", "dbAdminAnyDatabase", "userAdminAnyDatabase", "clusterAdmin"]
};
var test1Reader = {
diff --git a/jstests/sharding/auto2.js b/jstests/sharding/auto2.js
index 3d21559f8d6..3c8eb168d7b 100644
--- a/jstests/sharding/auto2.js
+++ b/jstests/sharding/auto2.js
@@ -16,15 +16,14 @@
var i = 0;
for (var j = 0; j < 30; j++) {
- print("j:" + j + " : " +
- Date.timeFunc(function() {
- var bulk = coll.initializeUnorderedBulkOp();
- for (var k = 0; k < 100; k++) {
- bulk.insert({num: i, s: bigString});
- i++;
- }
- assert.writeOK(bulk.execute());
- }));
+ print("j:" + j + " : " + Date.timeFunc(function() {
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var k = 0; k < 100; k++) {
+ bulk.insert({num: i, s: bigString});
+ i++;
+ }
+ assert.writeOK(bulk.execute());
+ }));
}
s.startBalancer();
@@ -92,8 +91,8 @@
print("checkpoint C");
assert(Array.unique(s.config.chunks.find().toArray().map(function(z) {
- return z.shard;
- })).length == 2,
+ return z.shard;
+ })).length == 2,
"should be using both servers");
for (i = 0; i < 100; i++) {
diff --git a/jstests/sharding/balance_repl.js b/jstests/sharding/balance_repl.js
index 433e8167829..39c28b46448 100644
--- a/jstests/sharding/balance_repl.js
+++ b/jstests/sharding/balance_repl.js
@@ -12,10 +12,16 @@
other: {
chunkSize: 1,
rs0: {
- nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}, ],
+ nodes: [
+ {rsConfig: {votes: 1}},
+ {rsConfig: {priority: 0, votes: 0}},
+ ],
},
rs1: {
- nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}, ],
+ nodes: [
+ {rsConfig: {votes: 1}},
+ {rsConfig: {priority: 0, votes: 0}},
+ ],
}
}
});
diff --git a/jstests/sharding/batch_write_command_sharded.js b/jstests/sharding/batch_write_command_sharded.js
index 9c0dc61d21d..884d5bb85bb 100644
--- a/jstests/sharding/batch_write_command_sharded.js
+++ b/jstests/sharding/batch_write_command_sharded.js
@@ -65,10 +65,7 @@
assert.commandWorked(coll.getMongo().getDB("admin").runCommand({setParameter: 1, logLevel: 4}));
coll.remove({});
- request = {
- insert: coll.getName(),
- documents: documents
- };
+ request = {insert: coll.getName(), documents: documents};
printjson(result = coll.runCommand(request));
assert(result.ok);
assert.eq(1000, result.n);
diff --git a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
index 242c1f28c09..e928eaebcf2 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
@@ -31,11 +31,7 @@ load('./jstests/libs/cleanup_orphaned_util.js');
var found = false;
for (var i = 0; i < 10000; i++) {
- var doc =
- {
- key: ObjectId()
- },
- hash = mongos.adminCommand({_hashBSONElement: doc.key}).out;
+ var doc = {key: ObjectId()}, hash = mongos.adminCommand({_hashBSONElement: doc.key}).out;
print('doc.key ' + doc.key + ' hashes to ' + hash);
diff --git a/jstests/sharding/cleanup_orphaned_cmd_prereload.js b/jstests/sharding/cleanup_orphaned_cmd_prereload.js
index 7155baea970..05fbd8b741a 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_prereload.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_prereload.js
@@ -18,12 +18,14 @@ jsTest.log("Moving some chunks to shard1...");
assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
assert(admin.runCommand({split: coll + "", middle: {_id: 1}}).ok);
-assert(admin.runCommand(
- {moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id, _waitForDelete: true})
- .ok);
-assert(admin.runCommand(
- {moveChunk: coll + "", find: {_id: 1}, to: shards[1]._id, _waitForDelete: true})
- .ok);
+assert(
+ admin
+ .runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id, _waitForDelete: true})
+ .ok);
+assert(
+ admin
+ .runCommand({moveChunk: coll + "", find: {_id: 1}, to: shards[1]._id, _waitForDelete: true})
+ .ok);
var metadata =
st.shard1.getDB("admin").runCommand({getShardVersion: coll + "", fullMetadata: true}).metadata;
@@ -52,9 +54,10 @@ assert.eq(metadata.shardVersion.t, 0);
assert.neq(metadata.collVersion.t, 0);
assert.eq(metadata.pending.length, 0);
-assert(admin.runCommand(
- {moveChunk: coll + "", find: {_id: 1}, to: shards[0]._id, _waitForDelete: true})
- .ok);
+assert(
+ admin
+ .runCommand({moveChunk: coll + "", find: {_id: 1}, to: shards[0]._id, _waitForDelete: true})
+ .ok);
var metadata =
st.shard0.getDB("admin").runCommand({getShardVersion: coll + "", fullMetadata: true}).metadata;
diff --git a/jstests/sharding/coll_epoch_test0.js b/jstests/sharding/coll_epoch_test0.js
index 0ec0a5d3201..49fe99914a0 100644
--- a/jstests/sharding/coll_epoch_test0.js
+++ b/jstests/sharding/coll_epoch_test0.js
@@ -21,19 +21,18 @@ config.shards.find().forEach(function(doc) {
var createdEpoch = null;
var checkEpochs = function() {
- config.chunks.find({ns: coll + ""})
- .forEach(function(chunk) {
-
- // Make sure the epochs exist, are non-zero, and are consistent
- assert(chunk.lastmodEpoch);
- print(chunk.lastmodEpoch + "");
- assert.neq(chunk.lastmodEpoch + "", "000000000000000000000000");
- if (createdEpoch == null)
- createdEpoch = chunk.lastmodEpoch;
- else
- assert.eq(createdEpoch, chunk.lastmodEpoch);
-
- });
+ config.chunks.find({ns: coll + ""}).forEach(function(chunk) {
+
+ // Make sure the epochs exist, are non-zero, and are consistent
+ assert(chunk.lastmodEpoch);
+ print(chunk.lastmodEpoch + "");
+ assert.neq(chunk.lastmodEpoch + "", "000000000000000000000000");
+ if (createdEpoch == null)
+ createdEpoch = chunk.lastmodEpoch;
+ else
+ assert.eq(createdEpoch, chunk.lastmodEpoch);
+
+ });
};
checkEpochs();
diff --git a/jstests/sharding/commands_that_write_accept_wc_configRS.js b/jstests/sharding/commands_that_write_accept_wc_configRS.js
index c8dd99fb06d..4ae7071da82 100644
--- a/jstests/sharding/commands_that_write_accept_wc_configRS.js
+++ b/jstests/sharding/commands_that_write_accept_wc_configRS.js
@@ -59,8 +59,8 @@ load('jstests/multiVersion/libs/auth_helpers.js');
shardCollectionWithChunks(st, coll);
adminDB.system.version.update(
{_id: "authSchema"}, {"currentVersion": 3}, {upsert: true});
- localDB.getSiblingDB('admin')
- .system.version.update({_id: "authSchema"}, {"currentVersion": 3}, {upsert: true});
+ localDB.getSiblingDB('admin').system.version.update(
+ {_id: "authSchema"}, {"currentVersion": 3}, {upsert: true});
db.createUser({user: 'user1', pwd: 'pass', roles: jsTest.basicUserRoles});
assert(db.auth({mechanism: 'MONGODB-CR', user: 'user1', pwd: 'pass'}));
@@ -238,10 +238,7 @@ load('jstests/multiVersion/libs/auth_helpers.js');
var setupFunc = cmd.setupFunc;
var confirmFunc = cmd.confirmFunc;
- req.writeConcern = {
- w: 'majority',
- wtimeout: 25000
- };
+ req.writeConcern = {w: 'majority', wtimeout: 25000};
jsTest.log("Testing " + tojson(req));
dropTestData();
diff --git a/jstests/sharding/commands_that_write_accept_wc_shards.js b/jstests/sharding/commands_that_write_accept_wc_shards.js
index f5bd55550e2..e584b4ee264 100644
--- a/jstests/sharding/commands_that_write_accept_wc_shards.js
+++ b/jstests/sharding/commands_that_write_accept_wc_shards.js
@@ -404,10 +404,7 @@ load('jstests/libs/write_concern_util.js');
});
function testValidWriteConcern(cmd) {
- cmd.req.writeConcern = {
- w: 'majority',
- wtimeout: 25000
- };
+ cmd.req.writeConcern = {w: 'majority', wtimeout: 25000};
jsTest.log("Testing " + tojson(cmd.req));
dropTestDatabase();
@@ -420,9 +417,7 @@ load('jstests/libs/write_concern_util.js');
}
function testInvalidWriteConcern(cmd) {
- cmd.req.writeConcern = {
- w: 'invalid'
- };
+ cmd.req.writeConcern = {w: 'invalid'};
jsTest.log("Testing " + tojson(cmd.req));
dropTestDatabase();
diff --git a/jstests/sharding/conf_server_write_concern.js b/jstests/sharding/conf_server_write_concern.js
index c4e08939548..d6ca0e006e0 100644
--- a/jstests/sharding/conf_server_write_concern.js
+++ b/jstests/sharding/conf_server_write_concern.js
@@ -10,12 +10,12 @@ function writeToConfigTest() {
{_id: 'balancer'}, {$set: {stopped: true}}, {writeConcern: {w: 'majority'}}));
// w:1 should still work - it gets automatically upconverted to w:majority
- assert.writeOK(confDB.settings.update(
- {_id: 'balancer'}, {$set: {stopped: true}}, {writeConcern: {w: 1}}));
+ assert.writeOK(
+ confDB.settings.update({_id: 'balancer'}, {$set: {stopped: true}}, {writeConcern: {w: 1}}));
// Write concerns other than w:1 and w:majority should fail.
- assert.writeError(confDB.settings.update(
- {_id: 'balancer'}, {$set: {stopped: true}}, {writeConcern: {w: 2}}));
+ assert.writeError(
+ confDB.settings.update({_id: 'balancer'}, {$set: {stopped: true}}, {writeConcern: {w: 2}}));
st.stop();
}
diff --git a/jstests/sharding/covered_shard_key_indexes.js b/jstests/sharding/covered_shard_key_indexes.js
index 307dc241d9f..98168e7dccb 100644
--- a/jstests/sharding/covered_shard_key_indexes.js
+++ b/jstests/sharding/covered_shard_key_indexes.js
@@ -46,10 +46,9 @@ assert.eq(0, coll.find({a: true}, {_id: 1, a: 1}).explain(true).executionStats.t
assert.commandWorked(coll.dropIndexes());
assert.commandWorked(coll.ensureIndex({a: 1, b: 1, _id: 1}));
assert.eq(1, coll.find({a: true, b: true}).explain(true).executionStats.totalDocsExamined);
-assert.eq(0,
- coll.find({a: true, b: true}, {_id: 1, a: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
+assert.eq(
+ 0,
+ coll.find({a: true, b: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
//
//
@@ -126,20 +125,18 @@ assert.writeOK(coll.insert({_id: true, a: {b: true}, c: true}));
// Index without shard key query - not covered
assert.commandWorked(coll.ensureIndex({c: 1}));
assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
-assert.eq(1,
- coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
+assert.eq(
+ 1,
+ coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1}).explain(true).executionStats.totalDocsExamined);
//
// Index with shard key query - nested query not covered even when projecting
assert.commandWorked(coll.dropIndex({c: 1}));
assert.commandWorked(coll.ensureIndex({c: 1, 'a.b': 1}));
assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
-assert.eq(1,
- coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1})
- .explain(true)
- .executionStats.totalDocsExamined);
+assert.eq(
+ 1,
+ coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1}).explain(true).executionStats.totalDocsExamined);
//
//
diff --git a/jstests/sharding/delete_during_migrate.js b/jstests/sharding/delete_during_migrate.js
index 982b0c00787..3926d180acb 100644
--- a/jstests/sharding/delete_during_migrate.js
+++ b/jstests/sharding/delete_during_migrate.js
@@ -38,8 +38,8 @@ startMongoProgramNoConnect("mongo",
dbname);
// migrate while deletions are happening
-var moveResult = s.adminCommand(
- {moveChunk: ns, find: {a: 1}, to: st.getOther(st.getPrimaryShard(dbname)).name});
+var moveResult =
+ s.adminCommand({moveChunk: ns, find: {a: 1}, to: st.getOther(st.getPrimaryShard(dbname)).name});
// check if migration worked
assert(moveResult.ok, "migration didn't work while doing deletes");
diff --git a/jstests/sharding/drop_sharded_db.js b/jstests/sharding/drop_sharded_db.js
index 962ff84fc40..9a0a56ca245 100644
--- a/jstests/sharding/drop_sharded_db.js
+++ b/jstests/sharding/drop_sharded_db.js
@@ -21,9 +21,7 @@
dbC.getCollection("data" + (i % numColls)).insert({_id: i});
}
- var key = {
- _id: 1
- };
+ var key = {_id: 1};
for (var i = 0; i < numColls; i++) {
st.shardColl(dbA.getCollection("data" + i), key);
st.shardColl(dbB.getCollection("data" + i), key);
diff --git a/jstests/sharding/empty_doc_results.js b/jstests/sharding/empty_doc_results.js
index be63f509532..2038a27c538 100644
--- a/jstests/sharding/empty_doc_results.js
+++ b/jstests/sharding/empty_doc_results.js
@@ -2,10 +2,7 @@
// Verifies that mongos correctly handles empty documents when all fields are projected out
//
-var options = {
- mongosOptions: {binVersion: ""},
- shardOptions: {binVersion: ""}
-};
+var options = {mongosOptions: {binVersion: ""}, shardOptions: {binVersion: ""}};
var st = new ShardingTest({shards: 2, other: options});
diff --git a/jstests/sharding/explain_cmd.js b/jstests/sharding/explain_cmd.js
index 767e26c7eb2..c638fccbced 100644
--- a/jstests/sharding/explain_cmd.js
+++ b/jstests/sharding/explain_cmd.js
@@ -50,10 +50,8 @@ assert.eq(2, explain.queryPlanner.winningPlan.shards.length);
assert.eq(2, explain.executionStats.executionStages.shards.length);
// An explain of a command that doesn't exist should fail gracefully.
-explain = db.runCommand({
- explain: {nonexistent: collSharded.getName(), query: {b: 1}},
- verbosity: "allPlansExecution"
-});
+explain = db.runCommand(
+ {explain: {nonexistent: collSharded.getName(), query: {b: 1}}, verbosity: "allPlansExecution"});
printjson(explain);
assert.commandFailed(explain);
diff --git a/jstests/sharding/explain_find_and_modify_sharded.js b/jstests/sharding/explain_find_and_modify_sharded.js
index 7c1b10321c2..62ffa2d35f8 100644
--- a/jstests/sharding/explain_find_and_modify_sharded.js
+++ b/jstests/sharding/explain_find_and_modify_sharded.js
@@ -12,9 +12,7 @@
st.stopBalancer();
var testDB = st.s.getDB('test');
- var shardKey = {
- a: 1
- };
+ var shardKey = {a: 1};
// Create a collection with an index on the intended shard key.
var shardedColl = testDB.getCollection(collName);
diff --git a/jstests/sharding/explain_read_pref.js b/jstests/sharding/explain_read_pref.js
index cdf1d1e74a4..8ac4fc4ff49 100644
--- a/jstests/sharding/explain_read_pref.js
+++ b/jstests/sharding/explain_read_pref.js
@@ -32,28 +32,28 @@ var testAllModes = function(conn, isMongos) {
// { tag: 'two' } so we can test the interaction of modes and tags. Test
// a bunch of combinations.
[
- // mode, tagSets, expectedHost
- ['primary', undefined, false],
- ['primary', [{}], false],
+ // mode, tagSets, expectedHost
+ ['primary', undefined, false],
+ ['primary', [{}], false],
- ['primaryPreferred', undefined, false],
- ['primaryPreferred', [{tag: 'one'}], false],
- // Correctly uses primary and ignores the tag
- ['primaryPreferred', [{tag: 'two'}], false],
+ ['primaryPreferred', undefined, false],
+ ['primaryPreferred', [{tag: 'one'}], false],
+ // Correctly uses primary and ignores the tag
+ ['primaryPreferred', [{tag: 'two'}], false],
- ['secondary', undefined, true],
- ['secondary', [{tag: 'two'}], true],
- ['secondary', [{tag: 'doesntexist'}, {}], true],
- ['secondary', [{tag: 'doesntexist'}, {tag: 'two'}], true],
+ ['secondary', undefined, true],
+ ['secondary', [{tag: 'two'}], true],
+ ['secondary', [{tag: 'doesntexist'}, {}], true],
+ ['secondary', [{tag: 'doesntexist'}, {tag: 'two'}], true],
- ['secondaryPreferred', undefined, true],
- ['secondaryPreferred', [{tag: 'one'}], false],
- ['secondaryPreferred', [{tag: 'two'}], true],
+ ['secondaryPreferred', undefined, true],
+ ['secondaryPreferred', [{tag: 'one'}], false],
+ ['secondaryPreferred', [{tag: 'two'}], true],
- // We don't have a way to alter ping times so we can't predict where an
- // untagged 'nearest' command should go, hence only test with tags.
- ['nearest', [{tag: 'one'}], false],
- ['nearest', [{tag: 'two'}], true]
+ // We don't have a way to alter ping times so we can't predict where an
+ // untagged 'nearest' command should go, hence only test with tags.
+ ['nearest', [{tag: 'one'}], false],
+ ['nearest', [{tag: 'two'}], true]
].forEach(function(args) {
var mode = args[0], tagSets = args[1], secExpected = args[2];
@@ -101,14 +101,8 @@ ReplSetTest.awaitRSClientHosts(st.s, st.rs0.nodes);
// Tag primary with { dc: 'ny', tag: 'one' }, secondary with { dc: 'ny', tag: 'two' }
var primary = st.rs0.getPrimary();
var secondary = st.rs0.getSecondary();
-var PRIMARY_TAG = {
- dc: 'ny',
- tag: 'one'
-};
-var SECONDARY_TAG = {
- dc: 'ny',
- tag: 'two'
-};
+var PRIMARY_TAG = {dc: 'ny', tag: 'one'};
+var SECONDARY_TAG = {dc: 'ny', tag: 'two'};
var rsConfig = primary.getDB("local").system.replset.findOne();
jsTest.log('got rsconf ' + tojson(rsConfig));
diff --git a/jstests/sharding/fair_balancer_round.js b/jstests/sharding/fair_balancer_round.js
index 90fc345c8cb..6b477efac78 100644
--- a/jstests/sharding/fair_balancer_round.js
+++ b/jstests/sharding/fair_balancer_round.js
@@ -2,9 +2,7 @@
// Tests that a balancer round loads newly sharded collection data
//
-var options = {
- mongosOptions: {verbose: 1}
-};
+var options = {mongosOptions: {verbose: 1}};
var st = new ShardingTest({shards: 2, mongos: 2, other: options});
@@ -30,8 +28,8 @@ for (var i = 0; i < numSplits; i++) {
st.stopMongos(0);
// Start balancer, which lets the stale mongos balance
-assert.writeOK(staleMongos.getDB("config")
- .settings.update({_id: "balancer"}, {$set: {stopped: false}}, true));
+assert.writeOK(
+ staleMongos.getDB("config").settings.update({_id: "balancer"}, {$set: {stopped: false}}, true));
// Make sure we eventually start moving chunks
assert.soon(function() {
diff --git a/jstests/sharding/features1.js b/jstests/sharding/features1.js
index dc547ddad74..d545ea096d6 100644
--- a/jstests/sharding/features1.js
+++ b/jstests/sharding/features1.js
@@ -182,13 +182,15 @@
printjson(db.foo6.getIndexes());
assert.eq(2,
- db.foo6.group({
- key: {a: 1},
- initial: {count: 0},
- reduce: function(z, prev) {
- prev.count++;
- }
- }).length);
+ db.foo6
+ .group({
+ key: {a: 1},
+ initial: {count: 0},
+ reduce: function(z, prev) {
+ prev.count++;
+ }
+ })
+ .length);
assert.eq(3, db.foo6.find().count());
assert(s.admin.runCommand({shardcollection: "test.foo6", key: {a: 1}}).ok);
@@ -202,11 +204,8 @@
// Remove when SERVER-10232 is fixed
assert.soon(function() {
- var cmdRes = s.admin.runCommand({
- movechunk: "test.foo6",
- find: {a: 3},
- to: s.getOther(s.getPrimaryShard("test")).name
- });
+ var cmdRes = s.admin.runCommand(
+ {movechunk: "test.foo6", find: {a: 3}, to: s.getOther(s.getPrimaryShard("test")).name});
return cmdRes.ok;
}, 'move chunk test.foo6', 60000, 1000);
diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js
index 010289ac1cc..fd28882213b 100644
--- a/jstests/sharding/features2.js
+++ b/jstests/sharding/features2.js
@@ -86,9 +86,7 @@
for (var i = 0; i < values.length; i++) {
total += values[i].count;
}
- return {
- count: total
- };
+ return {count: total};
};
doMR = function(n) {
@@ -146,12 +144,7 @@
doMR("after extra split");
- cmd = {
- mapreduce: "mr",
- map: "emit( ",
- reduce: "fooz + ",
- out: "broken1"
- };
+ cmd = {mapreduce: "mr", map: "emit( ", reduce: "fooz + ", out: "broken1"};
x = db.runCommand(cmd);
y = s._connections[0].getDB("test").runCommand(cmd);
diff --git a/jstests/sharding/fts_score_sort_sharded.js b/jstests/sharding/fts_score_sort_sharded.js
index e6bf01503be..8ce5c3383ad 100644
--- a/jstests/sharding/fts_score_sort_sharded.js
+++ b/jstests/sharding/fts_score_sort_sharded.js
@@ -51,8 +51,9 @@ assert.throws(function() {
});
// Projection specified with incorrect field name.
-cursor = coll.find({$text: {$search: "pizza"}}, {t: {$meta: "textScore"}})
- .sort({s: {$meta: "textScore"}});
+cursor = coll.find({$text: {$search: "pizza"}}, {t: {$meta: "textScore"}}).sort({
+ s: {$meta: "textScore"}
+});
assert.throws(function() {
cursor.next();
});
diff --git a/jstests/sharding/geo_near_random1.js b/jstests/sharding/geo_near_random1.js
index 0229c84555c..707d3c550a0 100644
--- a/jstests/sharding/geo_near_random1.js
+++ b/jstests/sharding/geo_near_random1.js
@@ -38,9 +38,7 @@
printShardingSizes();
- var opts = {
- sharded: true
- };
+ var opts = {sharded: true};
test.testPt([0, 0], opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
diff --git a/jstests/sharding/geo_near_random2.js b/jstests/sharding/geo_near_random2.js
index cdf8543274a..4833f5bc0d0 100644
--- a/jstests/sharding/geo_near_random2.js
+++ b/jstests/sharding/geo_near_random2.js
@@ -36,11 +36,7 @@
// Turn balancer back on, for actual tests
// s.startBalancer(); // SERVER-13365
- opts = {
- sphere: 0,
- nToTest: test.nPts * 0.01,
- sharded: true
- };
+ opts = {sphere: 0, nToTest: test.nPts * 0.01, sharded: true};
test.testPt([0, 0], opts);
test.testPt(test.mkPt(), opts);
test.testPt(test.mkPt(), opts);
diff --git a/jstests/sharding/geo_shardedgeonear.js b/jstests/sharding/geo_shardedgeonear.js
index 54bda17cf16..123b4b174cc 100644
--- a/jstests/sharding/geo_shardedgeonear.js
+++ b/jstests/sharding/geo_shardedgeonear.js
@@ -39,12 +39,7 @@ function test(db, sharded, indexType) {
assert.commandWorked(db[coll].ensureIndex({loc: indexType}));
var queryPoint = [0, 0];
- geoCmd = {
- geoNear: coll,
- near: queryPoint,
- spherical: true,
- includeLocs: true
- };
+ geoCmd = {geoNear: coll, near: queryPoint, spherical: true, includeLocs: true};
assert.commandWorked(db.runCommand(geoCmd), tojson({sharded: sharded, indexType: indexType}));
}
diff --git a/jstests/sharding/group_slaveok.js b/jstests/sharding/group_slaveok.js
index 0d9221ed896..2a18cd7a22e 100644
--- a/jstests/sharding/group_slaveok.js
+++ b/jstests/sharding/group_slaveok.js
@@ -40,12 +40,13 @@
// Should not throw exception, since slaveOk'd
assert.eq(10,
coll.group({
- key: {i: true},
- reduce: function(obj, ctx) {
- ctx.count += 1;
- },
- initial: {count: 0}
- }).length);
+ key: {i: true},
+ reduce: function(obj, ctx) {
+ ctx.count += 1;
+ },
+ initial: {count: 0}
+ })
+ .length);
try {
conn.setSlaveOk(false);
diff --git a/jstests/sharding/hash_shard1.js b/jstests/sharding/hash_shard1.js
index 10ab1b1308b..21b69472e3c 100644
--- a/jstests/sharding/hash_shard1.js
+++ b/jstests/sharding/hash_shard1.js
@@ -35,8 +35,8 @@ assert.neq(chunk, null, "all chunks on shard0000!");
printjson(chunk);
// try to move the chunk using an invalid specification method. should fail.
-var res = db.adminCommand(
- {movechunk: ns, find: {a: 0}, bounds: [chunk.min, chunk.max], to: "shard0000"});
+var res =
+ db.adminCommand({movechunk: ns, find: {a: 0}, bounds: [chunk.min, chunk.max], to: "shard0000"});
assert.eq(res.ok, 0, "moveChunk shouldn't work with invalid specification method");
// now move a chunk using the lower/upper bound method. should work.
diff --git a/jstests/sharding/index1.js b/jstests/sharding/index1.js
index 766bd96e260..9b57c3e43f8 100644
--- a/jstests/sharding/index1.js
+++ b/jstests/sharding/index1.js
@@ -118,9 +118,11 @@
// Make sure the index created is unique!
assert.eq(1,
- coll.getIndexes().filter(function(z) {
- return friendlyEqual(z.key, {num: 1}) && z.unique;
- }).length);
+ coll.getIndexes()
+ .filter(function(z) {
+ return friendlyEqual(z.key, {num: 1}) && z.unique;
+ })
+ .length);
}
if (i == 7) {
coll.remove({});
@@ -156,9 +158,11 @@
// Make sure the index created is unique!
assert.eq(1,
- coll.getIndexes().filter(function(z) {
- return friendlyEqual(z.key, {num: 1}) && z.unique;
- }).length);
+ coll.getIndexes()
+ .filter(function(z) {
+ return friendlyEqual(z.key, {num: 1}) && z.unique;
+ })
+ .length);
}
if (i == 9) {
// Unique index exists on a different field as well
diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js
index 93ce1b4d64a..96e351c25e8 100644
--- a/jstests/sharding/key_many.js
+++ b/jstests/sharding/key_many.js
@@ -115,9 +115,7 @@
function makeInQuery() {
if (curT.compound) {
// cheating a bit...
- return {
- 'o.a': {$in: [1, 2]}
- };
+ return {'o.a': {$in: [1, 2]}};
} else {
return makeObjectDotted({$in: curT.values});
}
@@ -178,26 +176,26 @@
assert.eq(
6, c.find().sort(makeObjectDotted(1)).count(), curT.name + " total count with count()");
- assert.eq(
- 2,
- c.find({$or: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]})
- .count(),
- curT.name + " $or count()");
- assert.eq(
- 2,
- c.find({$or: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]})
- .itcount(),
- curT.name + " $or itcount()");
- assert.eq(
- 4,
- c.find({$nor: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]})
- .count(),
- curT.name + " $nor count()");
- assert.eq(
- 4,
- c.find({$nor: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]})
- .itcount(),
- curT.name + " $nor itcount()");
+ assert.eq(2,
+ c.find({
+ $or: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
+ }).count(),
+ curT.name + " $or count()");
+ assert.eq(2,
+ c.find({
+ $or: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
+ }).itcount(),
+ curT.name + " $or itcount()");
+ assert.eq(4,
+ c.find({
+ $nor: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
+ }).count(),
+ curT.name + " $nor count()");
+ assert.eq(4,
+ c.find({
+ $nor: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]
+ }).itcount(),
+ curT.name + " $nor itcount()");
var stats = c.stats();
printjson(stats);
diff --git a/jstests/sharding/key_string.js b/jstests/sharding/key_string.js
index 414e056bf1f..43a270e0175 100644
--- a/jstests/sharding/key_string.js
+++ b/jstests/sharding/key_string.js
@@ -49,12 +49,9 @@
}),
"sort 1");
assert.eq("sara,mark,joe,eliot,bob,allan",
- db.foo.find()
- .sort({name: -1})
- .toArray()
- .map(function(z) {
- return z.name;
- }),
+ db.foo.find().sort({name: -1}).toArray().map(function(z) {
+ return z.name;
+ }),
"sort 2");
// make sure we can't foce a split on an extreme key
diff --git a/jstests/sharding/lagged_config_secondary.js b/jstests/sharding/lagged_config_secondary.js
index 5c28f79f24f..92f0453b941 100644
--- a/jstests/sharding/lagged_config_secondary.js
+++ b/jstests/sharding/lagged_config_secondary.js
@@ -9,8 +9,8 @@
var configSecondaryToKill = configSecondaryList[0];
var delayedConfigSecondary = configSecondaryList[1];
- delayedConfigSecondary.getDB('admin')
- .adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
+ delayedConfigSecondary.getDB('admin').adminCommand(
+ {configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
var testDB = st.s.getDB('test');
testDB.adminCommand({enableSharding: 'test'});
diff --git a/jstests/sharding/limit_push.js b/jstests/sharding/limit_push.js
index 055b5c8b788..b681f328298 100644
--- a/jstests/sharding/limit_push.js
+++ b/jstests/sharding/limit_push.js
@@ -31,9 +31,7 @@
// The query is asking for the maximum value below a given value
// db.limit_push.find( { x : { $lt : 60} } ).sort( { x:-1} ).limit(1)
- q = {
- x: {$lt: 60}
- };
+ q = {x: {$lt: 60}};
// Make sure the basic queries are correct
assert.eq(60, db.limit_push.find(q).count(), "Did not find 60 documents");
diff --git a/jstests/sharding/localhostAuthBypass.js b/jstests/sharding/localhostAuthBypass.js
index 459e627fb3f..b36972da685 100644
--- a/jstests/sharding/localhostAuthBypass.js
+++ b/jstests/sharding/localhostAuthBypass.js
@@ -107,17 +107,13 @@ var assertCannotRunCommands = function(mongo, st) {
{param: "userCacheInvalidationIntervalSecs", val: 300}
];
params.forEach(function(p) {
- var cmd = {
- setParameter: 1
- };
+ var cmd = {setParameter: 1};
cmd[p.param] = p.val;
assert.commandFailedWithCode(
mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "setParameter: " + p.param);
});
params.forEach(function(p) {
- var cmd = {
- getParameter: 1
- };
+ var cmd = {getParameter: 1};
cmd[p.param] = 1;
assert.commandFailedWithCode(
mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "getParameter: " + p.param);
diff --git a/jstests/sharding/max_time_ms_sharded.js b/jstests/sharding/max_time_ms_sharded.js
index 7194c98750c..36ccf7d1d6f 100644
--- a/jstests/sharding/max_time_ms_sharded.js
+++ b/jstests/sharding/max_time_ms_sharded.js
@@ -160,17 +160,16 @@
// Positive test for "mapReduce".
configureMaxTimeAlwaysTimeOut("alwaysOn");
- res = coll.runCommand("mapReduce",
- {
- map: function() {
- emit(0, 0);
- },
- reduce: function(key, values) {
- return 0;
- },
- out: {inline: 1},
- maxTimeMS: 60 * 1000
- });
+ res = coll.runCommand("mapReduce", {
+ map: function() {
+ emit(0, 0);
+ },
+ reduce: function(key, values) {
+ return 0;
+ },
+ out: {inline: 1},
+ maxTimeMS: 60 * 1000
+ });
assert.commandFailed(
res, "expected mapReduce to fail in mongod due to maxTimeAlwaysTimeOut fail point");
assert.eq(
@@ -180,17 +179,16 @@
// Negative test for "mapReduce".
configureMaxTimeAlwaysTimeOut("off");
- assert.commandWorked(coll.runCommand("mapReduce",
- {
- map: function() {
- emit(0, 0);
- },
- reduce: function(key, values) {
- return 0;
- },
- out: {inline: 1},
- maxTimeMS: 60 * 1000
- }),
+ assert.commandWorked(coll.runCommand("mapReduce", {
+ map: function() {
+ emit(0, 0);
+ },
+ reduce: function(key, values) {
+ return 0;
+ },
+ out: {inline: 1},
+ maxTimeMS: 60 * 1000
+ }),
"expected mapReduce to not hit time limit in mongod");
// Positive test for "aggregate".
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index 8895d14c0d6..01260123b67 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -42,11 +42,8 @@
s.printShardingStatus();
assert.throws(function() {
- s.adminCommand({
- movechunk: "test.foo",
- find: {x: 50},
- to: s.getOther(s.getPrimaryShard("test")).name
- });
+ s.adminCommand(
+ {movechunk: "test.foo", find: {x: 50}, to: s.getOther(s.getPrimaryShard("test")).name});
}, [], "move should fail");
for (i = 0; i < 20; i += 2) {
diff --git a/jstests/sharding/migration_failure.js b/jstests/sharding/migration_failure.js
index 6198c8d2cef..aee7fdc97db 100644
--- a/jstests/sharding/migration_failure.js
+++ b/jstests/sharding/migration_failure.js
@@ -53,8 +53,8 @@
assert.neq(version.global, failVersion.global);
- assert.commandWorked(st.shard0.getDB("admin")
- .runCommand({configureFailPoint: 'failApplyChunkOps', mode: 'off'}));
+ assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'failApplyChunkOps', mode: 'off'}));
assert.commandWorked(st.shard0.getDB("admin").runCommand(
{configureFailPoint: 'failCommitMigrationCommand', mode: 'off'}));
diff --git a/jstests/sharding/min_optime_recovery.js b/jstests/sharding/min_optime_recovery.js
index d77f1e2ad42..69ccc78f02c 100644
--- a/jstests/sharding/min_optime_recovery.js
+++ b/jstests/sharding/min_optime_recovery.js
@@ -47,12 +47,10 @@
assert.eq(null, doc);
}
- var restartCmdLineOptions = Object.merge(
- st.d0.fullOptions,
- {
- setParameter: 'recoverShardingState=' + (withRecovery ? 'true' : 'false'),
- restart: true
- });
+ var restartCmdLineOptions = Object.merge(st.d0.fullOptions, {
+ setParameter: 'recoverShardingState=' + (withRecovery ? 'true' : 'false'),
+ restart: true
+ });
// Restart the shard that donated a chunk to trigger the optime recovery logic.
st.stopMongod(0);
diff --git a/jstests/sharding/mongos_no_replica_set_refresh.js b/jstests/sharding/mongos_no_replica_set_refresh.js
index 3d9af893b55..53809fd88aa 100644
--- a/jstests/sharding/mongos_no_replica_set_refresh.js
+++ b/jstests/sharding/mongos_no_replica_set_refresh.js
@@ -10,7 +10,11 @@ load("jstests/replsets/rslib.js");
mongos: 1,
other: {
rs0: {
- nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}, ],
+ nodes: [
+ {},
+ {rsConfig: {priority: 0}},
+ {rsConfig: {priority: 0}},
+ ],
}
}
});
diff --git a/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
index 8eaf9653f11..0acb2cc5609 100644
--- a/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
@@ -11,11 +11,7 @@
// (connection connected after shard change).
//
-var options = {
- rs: true,
- rsOptions: {nodes: 2},
- keyFile: "jstests/libs/key1"
-};
+var options = {rs: true, rsOptions: {nodes: 2}, keyFile: "jstests/libs/key1"};
var st = new ShardingTest({shards: 3, mongos: 1, other: options});
@@ -82,9 +78,7 @@ authDBUsers(mongosConnActive);
var mongosConnIdle = null;
var mongosConnNew = null;
-var wc = {
- writeConcern: {w: 2, wtimeout: 60000}
-};
+var wc = {writeConcern: {w: 2, wtimeout: 60000}};
assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}, wc));
assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}, wc));
diff --git a/jstests/sharding/mongos_rs_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_shard_failure_tolerance.js
index e24566605ce..f811c9ad443 100644
--- a/jstests/sharding/mongos_rs_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_rs_shard_failure_tolerance.js
@@ -50,9 +50,7 @@
var mongosConnIdle = null;
var mongosConnNew = null;
- var wc = {
- writeConcern: {w: 2, wtimeout: 60000}
- };
+ var wc = {writeConcern: {w: 2, wtimeout: 60000}};
assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}, wc));
assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}, wc));
diff --git a/jstests/sharding/mongos_shard_failure_tolerance.js b/jstests/sharding/mongos_shard_failure_tolerance.js
index 73455666635..b3e480ae3ed 100644
--- a/jstests/sharding/mongos_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_shard_failure_tolerance.js
@@ -32,8 +32,8 @@
// Create the unsharded database
assert.writeOK(collUnsharded.insert({some: "doc"}));
assert.writeOK(collUnsharded.remove({}));
- assert.commandWorked(admin.runCommand(
- {movePrimary: collUnsharded.getDB().toString(), to: st.shard0.shardName}));
+ assert.commandWorked(
+ admin.runCommand({movePrimary: collUnsharded.getDB().toString(), to: st.shard0.shardName}));
//
// Setup is complete
diff --git a/jstests/sharding/move_chunk_basic.js b/jstests/sharding/move_chunk_basic.js
index 69bdf4d8c90..267ca74f718 100644
--- a/jstests/sharding/move_chunk_basic.js
+++ b/jstests/sharding/move_chunk_basic.js
@@ -33,10 +33,10 @@
assert(aChunk);
// Error if either of the bounds is not a valid shard key (BSON object - 1 yields a NaN)
- assert.commandFailed(mongos.adminCommand(
- {moveChunk: ns, bounds: [aChunk.min - 1, aChunk.max], to: shard1}));
- assert.commandFailed(mongos.adminCommand(
- {moveChunk: ns, bounds: [aChunk.min, aChunk.max - 1], to: shard1}));
+ assert.commandFailed(
+ mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min - 1, aChunk.max], to: shard1}));
+ assert.commandFailed(
+ mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max - 1], to: shard1}));
// Fail if find and bounds are both set.
assert.commandFailed(mongos.adminCommand(
diff --git a/jstests/sharding/move_chunk_wc.js b/jstests/sharding/move_chunk_wc.js
index 9f7d4b2b646..1ebf1be6b36 100644
--- a/jstests/sharding/move_chunk_wc.js
+++ b/jstests/sharding/move_chunk_wc.js
@@ -57,10 +57,7 @@ load('jstests/libs/write_concern_util.js');
_waitForDelete: true
};
- req.writeConcern = {
- w: 1,
- wtimeout: 30000
- };
+ req.writeConcern = {w: 1, wtimeout: 30000};
jsTest.log("Testing " + tojson(req));
var res = db.adminCommand(req);
assert.commandWorked(res);
@@ -68,10 +65,7 @@ load('jstests/libs/write_concern_util.js');
checkChunkCount(2, 0);
// This should pass because w: majority is always passed to config servers.
- req.writeConcern = {
- w: 2,
- wtimeout: 30000
- };
+ req.writeConcern = {w: 2, wtimeout: 30000};
jsTest.log("Testing " + tojson(req));
req.to = s1;
res = db.adminCommand(req);
@@ -80,10 +74,7 @@ load('jstests/libs/write_concern_util.js');
checkChunkCount(1, 1);
// This should fail because the writeConcern cannot be satisfied on the to shard.
- req.writeConcern = {
- w: 4,
- wtimeout: 3000
- };
+ req.writeConcern = {w: 4, wtimeout: 3000};
jsTest.log("Testing " + tojson(req));
req.to = s0;
res = db.adminCommand(req);
@@ -92,10 +83,7 @@ load('jstests/libs/write_concern_util.js');
checkChunkCount(1, 1);
// This should fail because the writeConcern cannot be satisfied on the from shard.
- req.writeConcern = {
- w: 6,
- wtimeout: 3000
- };
+ req.writeConcern = {w: 6, wtimeout: 3000};
jsTest.log("Testing " + tojson(req));
req.to = s0;
res = db.adminCommand(req);
@@ -104,10 +92,7 @@ load('jstests/libs/write_concern_util.js');
checkChunkCount(1, 1);
// This should fail because the writeConcern is invalid and cannot be satisfied anywhere.
- req.writeConcern = {
- w: "invalid",
- wtimeout: 3000
- };
+ req.writeConcern = {w: "invalid", wtimeout: 3000};
jsTest.log("Testing " + tojson(req));
req.to = s0;
res = db.adminCommand(req);
diff --git a/jstests/sharding/move_stale_mongos.js b/jstests/sharding/move_stale_mongos.js
index 63482d894c3..ceb805ee674 100644
--- a/jstests/sharding/move_stale_mongos.js
+++ b/jstests/sharding/move_stale_mongos.js
@@ -17,12 +17,8 @@ for (var i = 0; i < 100; i += 10) {
assert.commandWorked(st.s0.getDB('admin').runCommand({split: testNs, middle: {_id: i}}));
st.configRS.awaitLastOpCommitted(); // Ensure that other mongos sees the split
var nextShardIndex = (curShardIndex + 1) % shards.length;
- assert.commandWorked(st.s1.getDB('admin').runCommand({
- moveChunk: testNs,
- find: {_id: i + 5},
- to: shards[nextShardIndex],
- _waitForDelete: true
- }));
+ assert.commandWorked(st.s1.getDB('admin').runCommand(
+ {moveChunk: testNs, find: {_id: i + 5}, to: shards[nextShardIndex], _waitForDelete: true}));
curShardIndex = nextShardIndex;
st.configRS.awaitLastOpCommitted(); // Ensure that other mongos sees the move
}
diff --git a/jstests/sharding/movechunk_with_default_paranoia.js b/jstests/sharding/movechunk_with_default_paranoia.js
index a6f4704ec90..52597fec149 100644
--- a/jstests/sharding/movechunk_with_default_paranoia.js
+++ b/jstests/sharding/movechunk_with_default_paranoia.js
@@ -10,9 +10,11 @@ var shards = [st.shard0, st.shard1];
for (i in shards) {
var dbpath = shards[i].adminCommand("getCmdLineOpts").parsed.storage.dbPath;
var hasMoveChunkDir = 0 !=
- ls(dbpath).filter(function(a) {
- return null != a.match("moveChunk");
- }).length;
+ ls(dbpath)
+ .filter(function(a) {
+ return null != a.match("moveChunk");
+ })
+ .length;
assert(!hasMoveChunkDir, dbpath + ": has MoveChunk directory + " + ls(dbpath));
}
st.stop();
diff --git a/jstests/sharding/movechunk_with_moveParanoia.js b/jstests/sharding/movechunk_with_moveParanoia.js
index 96348d827bf..f8c2fd0fbd8 100644
--- a/jstests/sharding/movechunk_with_moveParanoia.js
+++ b/jstests/sharding/movechunk_with_moveParanoia.js
@@ -12,9 +12,11 @@ var foundMoveChunk = false;
for (i in shards) {
var dbpath = shards[i].adminCommand("getCmdLineOpts").parsed.storage.dbPath;
var hasMoveChunkDir = 0 !=
- ls(dbpath).filter(function(a) {
- return null != a.match("moveChunk");
- }).length;
+ ls(dbpath)
+ .filter(function(a) {
+ return null != a.match("moveChunk");
+ })
+ .length;
foundMoveChunk = foundMoveChunk || hasMoveChunkDir;
}
diff --git a/jstests/sharding/movechunk_with_noMoveParanoia.js b/jstests/sharding/movechunk_with_noMoveParanoia.js
index ae8ef5899a8..4e75421543a 100644
--- a/jstests/sharding/movechunk_with_noMoveParanoia.js
+++ b/jstests/sharding/movechunk_with_noMoveParanoia.js
@@ -11,9 +11,11 @@ var shards = [st.shard0, st.shard1];
for (i in shards) {
var dbpath = shards[i].adminCommand("getCmdLineOpts").parsed.storage.dbPath;
var hasMoveChunkDir = 0 !=
- ls(dbpath).filter(function(a) {
- return null != a.match("moveChunk");
- }).length;
+ ls(dbpath)
+ .filter(function(a) {
+ return null != a.match("moveChunk");
+ })
+ .length;
assert(!hasMoveChunkDir, dbpath + ": has MoveChunk directory + " + ls(dbpath));
}
st.stop();
diff --git a/jstests/sharding/mrShardedOutput.js b/jstests/sharding/mrShardedOutput.js
index ab3bae28d74..39def771ef9 100644
--- a/jstests/sharding/mrShardedOutput.js
+++ b/jstests/sharding/mrShardedOutput.js
@@ -64,9 +64,8 @@ assert.eq(numDocs,
// Make sure it's sharded and split
var newNumChunks = config.chunks.count({ns: testDB.mrShardedOut._fullName});
-assert.gt(newNumChunks,
- 1,
- "Sharding FAILURE: " + testDB.mrShardedOut._fullName + " has only 1 chunk");
+assert.gt(
+ newNumChunks, 1, "Sharding FAILURE: " + testDB.mrShardedOut._fullName + " has only 1 chunk");
// Check that there are no "jumbo" chunks.
var objSize = Object.bsonsize(testDB.mrShardedOut.findOne());
@@ -74,13 +73,11 @@ var docsPerChunk = 1024 * 1024 / objSize * 1.1; // 1MB chunk size + allowance
st.printShardingStatus(true);
-config.chunks.find({ns: testDB.mrShardedOut.getFullName()})
- .forEach(function(chunkDoc) {
- var count =
- testDB.mrShardedOut.find({_id: {$gte: chunkDoc.min._id, $lt: chunkDoc.max._id}})
- .itcount();
- assert.lte(count, docsPerChunk, 'Chunk has too many docs: ' + tojson(chunkDoc));
- });
+config.chunks.find({ns: testDB.mrShardedOut.getFullName()}).forEach(function(chunkDoc) {
+ var count =
+ testDB.mrShardedOut.find({_id: {$gte: chunkDoc.min._id, $lt: chunkDoc.max._id}}).itcount();
+ assert.lte(count, docsPerChunk, 'Chunk has too many docs: ' + tojson(chunkDoc));
+});
// Check that chunks for the newly created sharded output collection are well distributed.
var shard0Chunks =
@@ -127,9 +124,8 @@ assert.eq(numDocs,
// Make sure it's sharded and split
newNumChunks = config.chunks.count({ns: testDB.mrShardedOut._fullName});
-assert.gt(newNumChunks,
- 1,
- "Sharding FAILURE: " + testDB.mrShardedOut._fullName + " has only 1 chunk");
+assert.gt(
+ newNumChunks, 1, "Sharding FAILURE: " + testDB.mrShardedOut._fullName + " has only 1 chunk");
st.printShardingStatus(true);
diff --git a/jstests/sharding/mr_shard_version.js b/jstests/sharding/mr_shard_version.js
index fc2f7f02e4b..5040fc17ef2 100644
--- a/jstests/sharding/mr_shard_version.js
+++ b/jstests/sharding/mr_shard_version.js
@@ -31,11 +31,7 @@
jsTest.log("Starting migrations...");
- var migrateOp = {
- op: "command",
- ns: "admin",
- command: {moveChunk: "" + coll}
- };
+ var migrateOp = {op: "command", ns: "admin", command: {moveChunk: "" + coll}};
var checkMigrate = function() {
print("Result of migrate : ");
diff --git a/jstests/sharding/multi_mongos2.js b/jstests/sharding/multi_mongos2.js
index 2112408afc0..dcbd5a66655 100644
--- a/jstests/sharding/multi_mongos2.js
+++ b/jstests/sharding/multi_mongos2.js
@@ -28,9 +28,9 @@
assert.eq(1, res.ok, tojson(res));
printjson(s2.adminCommand({"getShardVersion": "test.existing"}));
- printjson(new Mongo(s1.getPrimaryShard("test").name)
- .getDB("admin")
- .adminCommand({"getShardVersion": "test.existing"}));
+ printjson(new Mongo(s1.getPrimaryShard("test").name).getDB("admin").adminCommand({
+ "getShardVersion": "test.existing"
+ }));
assert.eq(1, s1.getDB('test').existing.count({_id: 1})); // SERVER-2828
assert.eq(1, s2.getDB('test').existing.count({_id: 1}));
diff --git a/jstests/sharding/no_empty_reset.js b/jstests/sharding/no_empty_reset.js
index 61fe5905cc0..da77597ecae 100644
--- a/jstests/sharding/no_empty_reset.js
+++ b/jstests/sharding/no_empty_reset.js
@@ -31,12 +31,8 @@ var emptyShard = st.getShard(coll, {_id: -1});
var admin = st.s.getDB("admin");
assert.soon(
function() {
- var result = admin.runCommand({
- moveChunk: "" + coll,
- find: {_id: -1},
- to: fullShard.shardName,
- _waitForDelete: true
- });
+ var result = admin.runCommand(
+ {moveChunk: "" + coll, find: {_id: -1}, to: fullShard.shardName, _waitForDelete: true});
jsTestLog('moveChunk result = ' + tojson(result));
return result.ok;
},
diff --git a/jstests/sharding/pending_chunk.js b/jstests/sharding/pending_chunk.js
index 21107fe370d..96089b6d491 100644
--- a/jstests/sharding/pending_chunk.js
+++ b/jstests/sharding/pending_chunk.js
@@ -23,10 +23,10 @@
assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 0}}));
assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 1}}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: ns, find: {_id: 0}, to: shards[1]._id, _waitForDelete: true}));
- assert.commandWorked(admin.runCommand(
- {moveChunk: ns, find: {_id: 1}, to: shards[1]._id, _waitForDelete: true}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: ns, find: {_id: 0}, to: shards[1]._id, _waitForDelete: true}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: ns, find: {_id: 1}, to: shards[1]._id, _waitForDelete: true}));
function getMetadata(shard) {
var admin = shard.getDB('admin'),
@@ -50,8 +50,8 @@
assert.neq(metadata.collVersion.t, 0);
assert.eq(metadata.pending.length, 0);
- assert.commandWorked(admin.runCommand(
- {moveChunk: ns, find: {_id: 1}, to: shards[0]._id, _waitForDelete: true}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: ns, find: {_id: 1}, to: shards[0]._id, _waitForDelete: true}));
metadata = getMetadata(shard0);
assert.eq(metadata.shardVersion.t, 0);
diff --git a/jstests/sharding/prefix_shard_key.js b/jstests/sharding/prefix_shard_key.js
index 92a077d73b5..a13b133e3ef 100644
--- a/jstests/sharding/prefix_shard_key.js
+++ b/jstests/sharding/prefix_shard_key.js
@@ -109,27 +109,23 @@
_waitForDelete: true
}));
- var expectedShardCount = {
- shard0000: 0,
- shard0001: 0
- };
- config.chunks.find({ns: 'test.user'})
- .forEach(function(chunkDoc) {
- var min = chunkDoc.min.num;
- var max = chunkDoc.max.num;
-
- if (min < 0 || min == MinKey) {
- min = 0;
- }
+ var expectedShardCount = {shard0000: 0, shard0001: 0};
+ config.chunks.find({ns: 'test.user'}).forEach(function(chunkDoc) {
+ var min = chunkDoc.min.num;
+ var max = chunkDoc.max.num;
- if (max > 1000 || max == MaxKey) {
- max = 1000;
- }
+ if (min < 0 || min == MinKey) {
+ min = 0;
+ }
- if (max > 0) {
- expectedShardCount[chunkDoc.shard] += (max - min);
- }
- });
+ if (max > 1000 || max == MaxKey) {
+ max = 1000;
+ }
+
+ if (max > 0) {
+ expectedShardCount[chunkDoc.shard] += (max - min);
+ }
+ });
assert.eq(expectedShardCount['shard0000'], shard0.getDB('test').user.find().count());
assert.eq(expectedShardCount['shard0001'], shard1.getDB('test').user.find().count());
diff --git a/jstests/sharding/printShardingStatus.js b/jstests/sharding/printShardingStatus.js
index 05e6eca0d4f..63b5ef3090c 100644
--- a/jstests/sharding/printShardingStatus.js
+++ b/jstests/sharding/printShardingStatus.js
@@ -162,10 +162,7 @@
};
var collName = getCollName(testCollDetailsNum);
- var cmdObj = {
- shardCollection: collName,
- key: {_id: 1}
- };
+ var cmdObj = {shardCollection: collName, key: {_id: 1}};
if (args.unique) {
cmdObj.unique = true;
}
diff --git a/jstests/sharding/query_config.js b/jstests/sharding/query_config.js
index c6b08b8b7c0..7547eef007a 100644
--- a/jstests/sharding/query_config.js
+++ b/jstests/sharding/query_config.js
@@ -164,12 +164,13 @@
assert(!cursor.hasNext());
// Aggregate query.
- cursor = configDB.collections.aggregate([
- {$match: {"key.b": 1}},
- {$sort: {"_id": 1}},
- {$project: {"keyb": "$key.b", "keyc": "$key.c"}}
- ],
- {cursor: {batchSize: 2}});
+ cursor = configDB.collections.aggregate(
+ [
+ {$match: {"key.b": 1}},
+ {$sort: {"_id": 1}},
+ {$project: {"keyb": "$key.b", "keyc": "$key.c"}}
+ ],
+ {cursor: {batchSize: 2}});
assert.eq(cursor.objsLeftInBatch(), 2);
assert.eq(cursor.next(), {_id: testNamespaces[3], keyb: 1, keyc: 1});
assert.eq(cursor.next(), {_id: testNamespaces[2], keyb: 1});
@@ -220,8 +221,9 @@
st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 12}, to: shard2}));
// Find query.
- cursor = configDB.chunks.find({ns: testColl.getFullName()},
- {_id: 0, min: 1, max: 1, shard: 1}).sort({"min.e": 1});
+ cursor =
+ configDB.chunks.find({ns: testColl.getFullName()}, {_id: 0, min: 1, max: 1, shard: 1})
+ .sort({"min.e": 1});
assert.eq(cursor.next(), {min: {e: {"$minKey": 1}}, "max": {"e": 2}, shard: shard2});
assert.eq(cursor.next(), {min: {e: 2}, max: {e: 6}, shard: shard1});
assert.eq(cursor.next(), {min: {e: 6}, max: {e: 8}, shard: shard1});
@@ -258,9 +260,7 @@
}
};
var reduceFunction = function(key, values) {
- return {
- chunks: values.length
- };
+ return {chunks: values.length};
};
result = configDB.chunks.mapReduce(mapFunction, reduceFunction, {out: {inline: 1}});
assert.eq(result.ok, 1);
@@ -322,13 +322,14 @@
assert(!cursor.hasNext());
// Aggregate query.
- cursor = userColl.aggregate([
- {$match: {c: {$gt: 1}}},
- {$unwind: "$u"},
- {$group: {_id: "$u", sum: {$sum: "$c"}}},
- {$sort: {_id: 1}}
- ],
- {cursor: {batchSize: 2}});
+ cursor = userColl.aggregate(
+ [
+ {$match: {c: {$gt: 1}}},
+ {$unwind: "$u"},
+ {$group: {_id: "$u", sum: {$sum: "$c"}}},
+ {$sort: {_id: 1}}
+ ],
+ {cursor: {batchSize: 2}});
assert.eq(cursor.objsLeftInBatch(), 2);
assert.eq(cursor.next(), {_id: 1, sum: 11});
assert.eq(cursor.next(), {_id: 2, sum: 15});
@@ -365,18 +366,15 @@
emit(this.g, 1);
};
var reduceFunction = function(key, values) {
- return {
- count: values.length
- };
+ return {count: values.length};
};
result = userColl.mapReduce(mapFunction, reduceFunction, {out: {inline: 1}});
assert.eq(result.ok, 1);
- assert.eq(sortArrayById(result.results),
- [
- {_id: 1, value: {count: 2}},
- {_id: 2, value: {count: 3}},
- {_id: 3, value: {count: 2}}
- ]);
+ assert.eq(sortArrayById(result.results), [
+ {_id: 1, value: {count: 2}},
+ {_id: 2, value: {count: 3}},
+ {_id: 3, value: {count: 2}}
+ ]);
assert(userColl.drop());
};
diff --git a/jstests/sharding/read_pref.js b/jstests/sharding/read_pref.js
index aadd8903344..973f02993ac 100644
--- a/jstests/sharding/read_pref.js
+++ b/jstests/sharding/read_pref.js
@@ -3,9 +3,7 @@
* can be found in dbtests/replica_set_monitor_test.cpp.
*/
-var PRI_TAG = {
- dc: 'ny'
-};
+var PRI_TAG = {dc: 'ny'};
var SEC_TAGS = [{dc: 'sf', s: "1"}, {dc: 'ma', s: "2"}, {dc: 'eu', s: "3"}, {dc: 'jp', s: "4"}];
var NODES = SEC_TAGS.length + 1;
@@ -95,9 +93,7 @@ var doTest = function(useDollarQuerySyntax) {
var getExplain = function(readPrefMode, readPrefTags) {
if (useDollarQuerySyntax) {
- var readPrefObj = {
- mode: readPrefMode
- };
+ var readPrefObj = {mode: readPrefMode};
if (readPrefTags) {
readPrefObj.tags = readPrefTags;
diff --git a/jstests/sharding/read_pref_cmd.js b/jstests/sharding/read_pref_cmd.js
index 1e4aa48ee25..3cf6a694cd0 100644
--- a/jstests/sharding/read_pref_cmd.js
+++ b/jstests/sharding/read_pref_cmd.js
@@ -60,9 +60,7 @@ var testReadPreference = function(conn, hostList, isMongos, mode, tagSets, secEx
assert(cmdResult.ok);
var testedAtLeastOnce = false;
- var query = {
- op: 'command'
- };
+ var query = {op: 'command'};
Object.extend(query, profileQuery);
hostList.forEach(function(node) {
@@ -216,28 +214,28 @@ var testAllModes = function(conn, hostList, isMongos) {
// { tag: 'two' } so we can test the interaction of modes and tags. Test
// a bunch of combinations.
[
- // mode, tagSets, expectedHost
- ['primary', undefined, false],
- ['primary', [], false],
+ // mode, tagSets, expectedHost
+ ['primary', undefined, false],
+ ['primary', [], false],
- ['primaryPreferred', undefined, false],
- ['primaryPreferred', [{tag: 'one'}], false],
- // Correctly uses primary and ignores the tag
- ['primaryPreferred', [{tag: 'two'}], false],
+ ['primaryPreferred', undefined, false],
+ ['primaryPreferred', [{tag: 'one'}], false],
+ // Correctly uses primary and ignores the tag
+ ['primaryPreferred', [{tag: 'two'}], false],
- ['secondary', undefined, true],
- ['secondary', [{tag: 'two'}], true],
- ['secondary', [{tag: 'doesntexist'}, {}], true],
- ['secondary', [{tag: 'doesntexist'}, {tag: 'two'}], true],
+ ['secondary', undefined, true],
+ ['secondary', [{tag: 'two'}], true],
+ ['secondary', [{tag: 'doesntexist'}, {}], true],
+ ['secondary', [{tag: 'doesntexist'}, {tag: 'two'}], true],
- ['secondaryPreferred', undefined, true],
- ['secondaryPreferred', [{tag: 'one'}], false],
- ['secondaryPreferred', [{tag: 'two'}], true],
+ ['secondaryPreferred', undefined, true],
+ ['secondaryPreferred', [{tag: 'one'}], false],
+ ['secondaryPreferred', [{tag: 'two'}], true],
- // We don't have a way to alter ping times so we can't predict where an
- // untagged 'nearest' command should go, hence only test with tags.
- ['nearest', [{tag: 'one'}], false],
- ['nearest', [{tag: 'two'}], true]
+ // We don't have a way to alter ping times so we can't predict where an
+ // untagged 'nearest' command should go, hence only test with tags.
+ ['nearest', [{tag: 'one'}], false],
+ ['nearest', [{tag: 'two'}], true]
].forEach(function(args) {
var mode = args[0], tagSets = args[1], secExpected = args[2];
@@ -248,17 +246,17 @@ var testAllModes = function(conn, hostList, isMongos) {
});
[
- // Tags not allowed with primary
- ['primary', [{dc: 'doesntexist'}]],
- ['primary', [{dc: 'ny'}]],
- ['primary', [{dc: 'one'}]],
+ // Tags not allowed with primary
+ ['primary', [{dc: 'doesntexist'}]],
+ ['primary', [{dc: 'ny'}]],
+ ['primary', [{dc: 'one'}]],
- // No matching node
- ['secondary', [{tag: 'one'}]],
- ['nearest', [{tag: 'doesntexist'}]],
+ // No matching node
+ ['secondary', [{tag: 'one'}]],
+ ['nearest', [{tag: 'doesntexist'}]],
- ['invalid-mode', undefined],
- ['secondary', ['misformatted-tags']]
+ ['invalid-mode', undefined],
+ ['secondary', ['misformatted-tags']]
].forEach(function(args) {
var mode = args[0], tagSets = args[1];
@@ -278,14 +276,8 @@ ReplSetTest.awaitRSClientHosts(st.s, st.rs0.nodes);
// Tag primary with { dc: 'ny', tag: 'one' }, secondary with { dc: 'ny', tag: 'two' }
var primary = st.rs0.getPrimary();
var secondary = st.rs0.getSecondary();
-var PRIMARY_TAG = {
- dc: 'ny',
- tag: 'one'
-};
-var SECONDARY_TAG = {
- dc: 'ny',
- tag: 'two'
-};
+var PRIMARY_TAG = {dc: 'ny', tag: 'one'};
+var SECONDARY_TAG = {dc: 'ny', tag: 'two'};
var rsConfig = primary.getDB("local").system.replset.findOne();
jsTest.log('got rsconf ' + tojson(rsConfig));
diff --git a/jstests/sharding/regex_targeting.js b/jstests/sharding/regex_targeting.js
index 7dd927d8aab..33411447721 100644
--- a/jstests/sharding/regex_targeting.js
+++ b/jstests/sharding/regex_targeting.js
@@ -162,17 +162,14 @@ collSharded.remove({});
collCompound.remove({});
collNested.remove({});
assert.writeError(collSharded.update({a: /abcde.*/}, {$set: {a: /abcde.*/}}, {upsert: true}));
-assert.writeError(collCompound.update({a: /abcde.*/},
- {$set: {a: /abcde.*/, b: 1}},
- {upsert: true}));
+assert.writeError(
+ collCompound.update({a: /abcde.*/}, {$set: {a: /abcde.*/, b: 1}}, {upsert: true}));
// Exact regex in query never equality
-assert.writeError(collNested.update({'a.b': /abcde.*/},
- {$set: {'a.b': /abcde.*/}},
- {upsert: true}));
+assert.writeError(
+ collNested.update({'a.b': /abcde.*/}, {$set: {'a.b': /abcde.*/}}, {upsert: true}));
// Even nested regexes are not extracted in queries
-assert.writeError(collNested.update({a: {b: /abcde.*/}},
- {$set: {'a.b': /abcde.*/}},
- {upsert: true}));
+assert.writeError(
+ collNested.update({a: {b: /abcde.*/}}, {$set: {'a.b': /abcde.*/}}, {upsert: true}));
assert.writeError(collNested.update({c: 1}, {$set: {'a.b': /abcde.*/}}, {upsert: true}));
//
diff --git a/jstests/sharding/replmonitor_bad_seed.js b/jstests/sharding/replmonitor_bad_seed.js
index 1d52ac47abc..95d6d7526a8 100644
--- a/jstests/sharding/replmonitor_bad_seed.js
+++ b/jstests/sharding/replmonitor_bad_seed.js
@@ -13,9 +13,7 @@
* was able to refresh before proceeding to check.
*/
-var rsOpt = {
- oplogSize: 10
-};
+var rsOpt = {oplogSize: 10};
var st = new ShardingTest({shards: 1, rs: rsOpt});
var mongos = st.s;
var replTest = st.rs0;
diff --git a/jstests/sharding/secondary_query_routing.js b/jstests/sharding/secondary_query_routing.js
index ff0dfcb22d9..3eb706022aa 100644
--- a/jstests/sharding/secondary_query_routing.js
+++ b/jstests/sharding/secondary_query_routing.js
@@ -4,9 +4,7 @@
*/
(function() {
- var rsOpts = {
- nodes: 2
- };
+ var rsOpts = {nodes: 2};
var st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
st.s0.adminCommand({enableSharding: 'test'});
diff --git a/jstests/sharding/shard2.js b/jstests/sharding/shard2.js
index abe91508650..6f563aebbb0 100644
--- a/jstests/sharding/shard2.js
+++ b/jstests/sharding/shard2.js
@@ -40,9 +40,8 @@ db.foo.save({num: 1, name: "eliot"});
db.foo.save({num: 2, name: "sara"});
db.foo.save({num: -1, name: "joe"});
-assert.eq(3,
- s.getPrimaryShard("test").getDB("test").foo.find().length(),
- "not right directly to db A");
+assert.eq(
+ 3, s.getPrimaryShard("test").getDB("test").foo.find().length(), "not right directly to db A");
assert.eq(3, db.foo.find().length(), "not right on shard");
primary = s.getPrimaryShard("test").getDB("test");
@@ -58,12 +57,8 @@ placeCheck(2);
// test move shard
assert.throws(function() {
- s.adminCommand({
- movechunk: "test.foo",
- find: {num: 1},
- to: primary.getMongo().name,
- _waitForDelete: true
- });
+ s.adminCommand(
+ {movechunk: "test.foo", find: {num: 1}, to: primary.getMongo().name, _waitForDelete: true});
});
assert.throws(function() {
s.adminCommand({movechunk: "test.foo", find: {num: 1}, to: "adasd", _waitForDelete: true});
@@ -74,9 +69,8 @@ s.adminCommand(
assert.eq(2, secondary.foo.find().length(), "secondary should have 2 after move shard");
assert.eq(1, primary.foo.find().length(), "primary should only have 1 after move shard");
-assert.eq(2,
- s.config.chunks.count(),
- "still should have 2 shards after move not:" + s.getChunksString());
+assert.eq(
+ 2, s.config.chunks.count(), "still should have 2 shards after move not:" + s.getChunksString());
chunks = s.config.chunks.find().toArray();
assert.neq(chunks[0].shard, chunks[1].shard, "servers should NOT be the same after the move");
diff --git a/jstests/sharding/shard_aware_init.js b/jstests/sharding/shard_aware_init.js
index 199eb369557..5dda623bb77 100644
--- a/jstests/sharding/shard_aware_init.js
+++ b/jstests/sharding/shard_aware_init.js
@@ -40,8 +40,8 @@
mongodConn = MongoRunner.runMongod(options);
waitForMaster(mongodConn);
- var res = mongodConn.getDB('admin')
- .system.version.update({_id: 'shardIdentity'}, shardIdentityDoc);
+ var res = mongodConn.getDB('admin').system.version.update({_id: 'shardIdentity'},
+ shardIdentityDoc);
assert.eq(1, res.nModified);
MongoRunner.stopMongod(mongodConn.port);
@@ -60,8 +60,8 @@
return mongodConn;
};
- assert.writeOK(mongodConn.getDB('admin')
- .system.version.update({_id: 'shardIdentity'}, shardIdentityDoc, true));
+ assert.writeOK(mongodConn.getDB('admin').system.version.update(
+ {_id: 'shardIdentity'}, shardIdentityDoc, true));
var res = mongodConn.getDB('admin').runCommand({shardingState: 1});
@@ -98,8 +98,7 @@
waitForMaster(mongodConn);
assert.writeOK(mongodConn.getDB('admin').system.version.update(
- {_id: 'shardIdentity'},
- {_id: 'shardIdentity', shardName: 'x', clusterId: ObjectId()}));
+ {_id: 'shardIdentity'}, {_id: 'shardIdentity', shardName: 'x', clusterId: ObjectId()}));
MongoRunner.stopMongod(mongodConn.port);
diff --git a/jstests/sharding/shard_aware_primary_failover.js b/jstests/sharding/shard_aware_primary_failover.js
index 0d939c6e1ea..127e74b948c 100644
--- a/jstests/sharding/shard_aware_primary_failover.js
+++ b/jstests/sharding/shard_aware_primary_failover.js
@@ -29,8 +29,8 @@
clusterId: ObjectId()
};
- assert.writeOK(primaryConn.getDB('admin')
- .system.version.insert(shardIdentityDoc, {writeConcern: {w: 'majority'}}));
+ assert.writeOK(primaryConn.getDB('admin').system.version.insert(
+ shardIdentityDoc, {writeConcern: {w: 'majority'}}));
replTest.stopMaster();
replTest.waitForMaster();
diff --git a/jstests/sharding/shard_identity_config_update.js b/jstests/sharding/shard_identity_config_update.js
index 4eb142d7f20..678a04c79fa 100644
--- a/jstests/sharding/shard_identity_config_update.js
+++ b/jstests/sharding/shard_identity_config_update.js
@@ -19,8 +19,8 @@
clusterId: ObjectId()
};
- var res = conn.getDB('admin')
- .system.version.update({_id: 'shardIdentity'}, shardIdentityDoc, true);
+ var res = conn.getDB('admin').system.version.update(
+ {_id: 'shardIdentity'}, shardIdentityDoc, true);
assert.eq(1, res.nUpserted);
};
diff --git a/jstests/sharding/sharding_balance1.js b/jstests/sharding/sharding_balance1.js
index d2fad545bba..22a8aaa6210 100644
--- a/jstests/sharding/sharding_balance1.js
+++ b/jstests/sharding/sharding_balance1.js
@@ -39,16 +39,12 @@
assert.lt(20, diff1(), "big differential here");
print(diff1());
- assert.soon(
- function() {
- var d = diff1();
- return d < 5;
- // Make sure there's enough time here, since balancing can sleep for 15s or so between
- // balances.
- },
- "balance didn't happen",
- 1000 * 60 * 5,
- 5000);
+ assert.soon(function() {
+ var d = diff1();
+ return d < 5;
+ // Make sure there's enough time here, since balancing can sleep for 15s or so between
+ // balances.
+ }, "balance didn't happen", 1000 * 60 * 5, 5000);
s.stop();
})();
diff --git a/jstests/sharding/sharding_balance4.js b/jstests/sharding/sharding_balance4.js
index 91c9d5ce9ad..170448cbf11 100644
--- a/jstests/sharding/sharding_balance4.js
+++ b/jstests/sharding/sharding_balance4.js
@@ -35,13 +35,9 @@
//
function doUpdate(bulk, includeString, optionalId) {
- var up = {
- $inc: {x: 1}
- };
+ var up = {$inc: {x: 1}};
if (includeString) {
- up["$set"] = {
- s: bigString
- };
+ up["$set"] = {s: bigString};
}
var myid = optionalId == undefined ? Random.randInt(N) : optionalId;
bulk.find({_id: myid}).upsert().update(up);
diff --git a/jstests/sharding/sharding_options.js b/jstests/sharding/sharding_options.js
index 0841967b18e..333b53e9cfd 100644
--- a/jstests/sharding/sharding_options.js
+++ b/jstests/sharding/sharding_options.js
@@ -4,9 +4,7 @@ load('jstests/libs/command_line/test_parsed_options.js');
// Move Paranoia
jsTest.log("Testing \"moveParanoia\" command line option");
-var expectedResult = {
- "parsed": {"sharding": {"archiveMovedChunks": true}}
-};
+var expectedResult = {"parsed": {"sharding": {"archiveMovedChunks": true}}};
testGetCmdLineOptsMongod({moveParanoia: ""}, expectedResult);
jsTest.log("Testing \"noMoveParanoia\" command line option");
@@ -51,9 +49,7 @@ testGetCmdLineOptsMongod({config: "jstests/libs/config_files/set_shardingrole.js
// Auto Splitting
jsTest.log("Testing \"noAutoSplit\" command line option");
-var expectedResult = {
- "parsed": {"sharding": {"autoSplit": false}}
-};
+var expectedResult = {"parsed": {"sharding": {"autoSplit": false}}};
testGetCmdLineOptsMongos({noAutoSplit: ""}, expectedResult);
jsTest.log("Testing \"sharding.autoSplit\" config file option");
diff --git a/jstests/sharding/sharding_rs2.js b/jstests/sharding/sharding_rs2.js
index 7c323ac5d44..ed15592a9b2 100644
--- a/jstests/sharding/sharding_rs2.js
+++ b/jstests/sharding/sharding_rs2.js
@@ -19,10 +19,16 @@
other: {
chunkSize: 1,
rs0: {
- nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}, ],
+ nodes: [
+ {rsConfig: {votes: 1}},
+ {rsConfig: {priority: 0, votes: 0}},
+ ],
},
rs1: {
- nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}, ],
+ nodes: [
+ {rsConfig: {votes: 1}},
+ {rsConfig: {priority: 0, votes: 0}},
+ ],
}
}
});
diff --git a/jstests/sharding/sharding_state_after_stepdown.js b/jstests/sharding/sharding_state_after_stepdown.js
index 3007b4b08a2..6bd2f4927cc 100644
--- a/jstests/sharding/sharding_state_after_stepdown.js
+++ b/jstests/sharding/sharding_state_after_stepdown.js
@@ -38,22 +38,12 @@
st.rs0.stop(rs0Primary);
st.rs1.stop(rs1Primary);
- ReplSetTest.awaitRSClientHosts(mongos,
- [rs0Primary, rs1Primary],
- {
- ok:
- false
- });
+ ReplSetTest.awaitRSClientHosts(mongos, [rs0Primary, rs1Primary], {ok: false});
st.rs0.start(rs0Primary, Object.extend(rs0Primary.savedOptions, {restart: true}));
st.rs1.start(rs1Primary, Object.extend(rs1Primary.savedOptions, {restart: true}));
- ReplSetTest.awaitRSClientHosts(mongos,
- [rs0Primary, rs1Primary],
- {
- ismaster:
- true
- });
+ ReplSetTest.awaitRSClientHosts(mongos, [rs0Primary, rs1Primary], {ismaster: true});
};
restartPrimaries();
@@ -109,12 +99,7 @@
// Expected connection exception, will check for stepdown later
}
- ReplSetTest.awaitRSClientHosts(mongos,
- [rs0Primary, rs1Primary],
- {
- secondary:
- true
- });
+ ReplSetTest.awaitRSClientHosts(mongos, [rs0Primary, rs1Primary], {secondary: true});
assert.commandWorked(new Mongo(rs0Primary.host).adminCommand({replSetFreeze: 0}));
assert.commandWorked(new Mongo(rs1Primary.host).adminCommand({replSetFreeze: 0}));
@@ -126,12 +111,7 @@
assert.commandWorked(rs0Primary.adminCommand({connPoolSync: true}));
assert.commandWorked(rs1Primary.adminCommand({connPoolSync: true}));
- ReplSetTest.awaitRSClientHosts(mongos,
- [rs0Primary, rs1Primary],
- {
- ismaster:
- true
- });
+ ReplSetTest.awaitRSClientHosts(mongos, [rs0Primary, rs1Primary], {ismaster: true});
};
stepDownPrimaries();
@@ -140,30 +120,26 @@
//
// No sharding metadata until shards are hit by a metadata operation
assert.eq({},
- st.rs0.getPrimary().adminCommand({
- getShardVersion: collSharded.toString(),
- fullMetadata: true
- }).metadata);
+ st.rs0.getPrimary()
+ .adminCommand({getShardVersion: collSharded.toString(), fullMetadata: true})
+ .metadata);
assert.eq({},
- st.rs1.getPrimary().adminCommand({
- getShardVersion: collSharded.toString(),
- fullMetadata: true
- }).metadata);
+ st.rs1.getPrimary()
+ .adminCommand({getShardVersion: collSharded.toString(), fullMetadata: true})
+ .metadata);
//
//
// Metadata commands should enable sharding data implicitly
assert.commandWorked(mongos.adminCommand({split: collSharded.toString(), middle: {_id: 0}}));
assert.eq({},
- st.rs0.getPrimary().adminCommand({
- getShardVersion: collSharded.toString(),
- fullMetadata: true
- }).metadata);
+ st.rs0.getPrimary()
+ .adminCommand({getShardVersion: collSharded.toString(), fullMetadata: true})
+ .metadata);
assert.neq({},
- st.rs1.getPrimary().adminCommand({
- getShardVersion: collSharded.toString(),
- fullMetadata: true
- }).metadata);
+ st.rs1.getPrimary()
+ .adminCommand({getShardVersion: collSharded.toString(), fullMetadata: true})
+ .metadata);
//
//
@@ -171,15 +147,13 @@
assert.commandWorked(mongos.adminCommand(
{moveChunk: collSharded.toString(), find: {_id: 0}, to: shards[0]._id}));
assert.neq({},
- st.rs0.getPrimary().adminCommand({
- getShardVersion: collSharded.toString(),
- fullMetadata: true
- }).metadata);
+ st.rs0.getPrimary()
+ .adminCommand({getShardVersion: collSharded.toString(), fullMetadata: true})
+ .metadata);
assert.neq({},
- st.rs1.getPrimary().adminCommand({
- getShardVersion: collSharded.toString(),
- fullMetadata: true
- }).metadata);
+ st.rs1.getPrimary()
+ .adminCommand({getShardVersion: collSharded.toString(), fullMetadata: true})
+ .metadata);
st.stop();
diff --git a/jstests/sharding/sort1.js b/jstests/sharding/sort1.js
index 57bae9dc390..5cf8dcfd901 100644
--- a/jstests/sharding/sort1.js
+++ b/jstests/sharding/sort1.js
@@ -54,10 +54,9 @@
z = db.data.find().sort({'sub.num': 1}).toArray();
}, 200);
assert.eq(100, z.length, "C1");
- b = 1.5 *
- Date.timeFunc(function() {
- z = s.getPrimaryShard("test").getDB("test").data.find().sort({'sub.num': 1}).toArray();
- }, 200);
+ b = 1.5 * Date.timeFunc(function() {
+ z = s.getPrimaryShard("test").getDB("test").data.find().sort({'sub.num': 1}).toArray();
+ }, 200);
assert.eq(67, z.length, "C2");
print("a: " + a + " b:" + b + " mongos slow down: " + Math.ceil(100 * ((a - b) / b)) + "%");
diff --git a/jstests/sharding/split_large_key.js b/jstests/sharding/split_large_key.js
index 5a8fe060c67..0468fce757b 100644
--- a/jstests/sharding/split_large_key.js
+++ b/jstests/sharding/split_large_key.js
@@ -38,10 +38,7 @@
tests.forEach(function(test) {
var collName = "split_large_key_" + test.name;
var midKey = {};
- var chunkKeys = {
- min: {},
- max: {}
- };
+ var chunkKeys = {min: {}, max: {}};
for (var k in test.key) {
// new Array with join creates string length 1 less than size, so add 1
midKey[k] = new Array(test.keyFieldSize + 1).join('a');
diff --git a/jstests/sharding/split_with_force.js b/jstests/sharding/split_with_force.js
index c66d2f145eb..35e25b5803e 100644
--- a/jstests/sharding/split_with_force.js
+++ b/jstests/sharding/split_with_force.js
@@ -40,13 +40,15 @@ jsTest.log("Get split points of the chunk using force : true...");
var maxChunkSizeBytes = 1024 * 1024;
-var splitKeys = shardAdmin.runCommand({
- splitVector: coll + "",
- keyPattern: {_id: 1},
- min: {_id: 0},
- max: {_id: MaxKey},
- force: true
-}).splitKeys;
+var splitKeys = shardAdmin
+ .runCommand({
+ splitVector: coll + "",
+ keyPattern: {_id: 1},
+ min: {_id: 0},
+ max: {_id: MaxKey},
+ force: true
+ })
+ .splitKeys;
printjson(splitKeys);
printjson(coll.stats());
diff --git a/jstests/sharding/stale_mongos_updates_and_removes.js b/jstests/sharding/stale_mongos_updates_and_removes.js
index 791120f6f82..d5d03fcb442 100644
--- a/jstests/sharding/stale_mongos_updates_and_removes.js
+++ b/jstests/sharding/stale_mongos_updates_and_removes.js
@@ -76,12 +76,8 @@ var makeStaleMongosTargetSingleShard = function() {
};
var checkAllRemoveQueries = function(makeMongosStaleFunc) {
- var multi = {
- justOne: false
- };
- var single = {
- justOne: true
- };
+ var multi = {justOne: false};
+ var single = {justOne: true};
var doRemove = function(query, multiOption, makeMongosStaleFunc) {
makeMongosStaleFunc();
@@ -119,23 +115,12 @@ var checkAllRemoveQueries = function(makeMongosStaleFunc) {
};
var checkAllUpdateQueries = function(makeMongosStaleFunc) {
- var oUpdate = {
- $inc: {fieldToUpdate: 1}
- }; // op-style update (non-idempotent)
- var rUpdate = {
- x: 0,
- fieldToUpdate: 1
- }; // replacement-style update (idempotent)
- var queryAfterUpdate = {
- fieldToUpdate: 1
- };
+ var oUpdate = {$inc: {fieldToUpdate: 1}}; // op-style update (non-idempotent)
+ var rUpdate = {x: 0, fieldToUpdate: 1}; // replacement-style update (idempotent)
+ var queryAfterUpdate = {fieldToUpdate: 1};
- var multi = {
- multi: true
- };
- var single = {
- multi: false
- };
+ var multi = {multi: true};
+ var single = {multi: false};
var doUpdate = function(query, update, multiOption, makeMongosStaleFunc) {
makeMongosStaleFunc();
@@ -200,20 +185,14 @@ var freshMongos = st.s0;
var staleMongos = st.s1;
var emptyQuery = {};
-var pointQuery = {
- x: 0
-};
+var pointQuery = {x: 0};
// Choose a range that would fall on only one shard.
// Use (splitPoint - 1) because of SERVER-20768.
-var rangeQuery = {
- x: {$gte: 0, $lt: splitPoint - 1}
-};
+var rangeQuery = {x: {$gte: 0, $lt: splitPoint - 1}};
// Choose points that would fall on two different shards.
-var multiPointQuery = {
- $or: [{x: 0}, {x: numShardKeys}]
-};
+var multiPointQuery = {$or: [{x: 0}, {x: numShardKeys}]};
checkAllRemoveQueries(makeStaleMongosTargetSingleShard);
checkAllRemoveQueries(makeStaleMongosTargetMultipleShards);
diff --git a/jstests/sharding/stats.js b/jstests/sharding/stats.js
index da6d842fb99..36ce75b520e 100644
--- a/jstests/sharding/stats.js
+++ b/jstests/sharding/stats.js
@@ -192,9 +192,7 @@
}
// indexDetailsKey - show indexDetails results for this index key only.
- var indexKey = {
- a: 1
- };
+ var indexKey = {a: 1};
var indexName = getIndexName(indexKey);
checkIndexDetails({indexDetails: true, indexDetailsKey: indexKey}, indexName);
diff --git a/jstests/sharding/top_chunk_autosplit.js b/jstests/sharding/top_chunk_autosplit.js
index 74a3e942cae..b4bf2c96e60 100644
--- a/jstests/sharding/top_chunk_autosplit.js
+++ b/jstests/sharding/top_chunk_autosplit.js
@@ -72,10 +72,7 @@ function runTest(test) {
// Insert one doc at a time until first auto-split occurs on top chunk
var xval = test.inserts.value;
do {
- var doc = {
- x: xval,
- val: largeStr
- };
+ var doc = {x: xval, val: largeStr};
coll.insert(doc);
xval += test.inserts.inc;
} while (getNumberOfChunks(configDB) <= numChunks);
@@ -108,44 +105,17 @@ var configDB = st.s.getDB('config');
// Define shard key ranges for each of the shard nodes
var MINVAL = -500;
var MAXVAL = 1500;
-var lowChunkRange = {
- min: MINVAL,
- max: 0
-};
-var midChunkRange1 = {
- min: 0,
- max: 500
-};
-var midChunkRange2 = {
- min: 500,
- max: 1000
-};
-var highChunkRange = {
- min: 1000,
- max: MAXVAL
-};
-
-var lowChunkTagRange = {
- min: MinKey,
- max: 0
-};
-var highChunkTagRange = {
- min: 1000,
- max: MaxKey
-};
-
-var lowChunkInserts = {
- value: 0,
- inc: -1
-};
-var midChunkInserts = {
- value: 1,
- inc: 1
-};
-var highChunkInserts = {
- value: 1000,
- inc: 1
-};
+var lowChunkRange = {min: MINVAL, max: 0};
+var midChunkRange1 = {min: 0, max: 500};
+var midChunkRange2 = {min: 500, max: 1000};
+var highChunkRange = {min: 1000, max: MAXVAL};
+
+var lowChunkTagRange = {min: MinKey, max: 0};
+var highChunkTagRange = {min: 1000, max: MaxKey};
+
+var lowChunkInserts = {value: 0, inc: -1};
+var midChunkInserts = {value: 1, inc: 1};
+var highChunkInserts = {value: 1000, inc: 1};
var lowChunk = 1;
var highChunk = -1;