summaryrefslogtreecommitdiff
path: root/jstests/sharding
diff options
context:
space:
mode:
authorJonathan Abrahams <jonathan@mongodb.com>2016-03-09 12:17:50 -0500
committerJonathan Abrahams <jonathan@mongodb.com>2016-03-09 12:18:14 -0500
commit4ae691e8edc87d0e3cfb633bb91c328426be007b (patch)
tree52079a593f54382ca13a2e741633eab1b6271893 /jstests/sharding
parenta025d43f3ce2efc1fb1282a718f5d286fa0a4dc1 (diff)
downloadmongo-4ae691e8edc87d0e3cfb633bb91c328426be007b.tar.gz
SERVER-22468 Format JS code with approved style in jstests/
Diffstat (limited to 'jstests/sharding')
-rw-r--r--jstests/sharding/SERVER-7379.js32
-rw-r--r--jstests/sharding/add_invalid_shard.js58
-rw-r--r--jstests/sharding/addshard1.js108
-rw-r--r--jstests/sharding/addshard2.js250
-rw-r--r--jstests/sharding/addshard4.js77
-rw-r--r--jstests/sharding/addshard5.js73
-rw-r--r--jstests/sharding/all_config_hosts_down.js63
-rw-r--r--jstests/sharding/all_config_servers_blackholed_from_mongos.js57
-rw-r--r--jstests/sharding/array_shard_key.js99
-rw-r--r--jstests/sharding/auth.js606
-rw-r--r--jstests/sharding/auth2.js19
-rw-r--r--jstests/sharding/authCommands.js576
-rw-r--r--jstests/sharding/authConnectionHook.js44
-rw-r--r--jstests/sharding/auth_add_shard.js138
-rw-r--r--jstests/sharding/auth_copydb.js57
-rw-r--r--jstests/sharding/auth_repl.js11
-rw-r--r--jstests/sharding/auth_slaveok_routing.js68
-rw-r--r--jstests/sharding/authmr.js219
-rw-r--r--jstests/sharding/authwhere.js135
-rw-r--r--jstests/sharding/auto1.js113
-rw-r--r--jstests/sharding/auto2.js255
-rw-r--r--jstests/sharding/auto_rebalance.js92
-rw-r--r--jstests/sharding/autodiscover_config_rs_from_secondary.js72
-rw-r--r--jstests/sharding/autosplit_heuristics.js110
-rw-r--r--jstests/sharding/balance_repl.js110
-rw-r--r--jstests/sharding/balance_tags1.js48
-rw-r--r--jstests/sharding/balance_tags2.js46
-rw-r--r--jstests/sharding/basic_drop_coll.js61
-rw-r--r--jstests/sharding/basic_sharding_params.js45
-rw-r--r--jstests/sharding/basic_split.js127
-rw-r--r--jstests/sharding/batch_write_command_sharded.js476
-rw-r--r--jstests/sharding/bouncing_count.js75
-rw-r--r--jstests/sharding/bulk_insert.js490
-rw-r--r--jstests/sharding/bulk_shard_insert.js118
-rw-r--r--jstests/sharding/cleanup_orphaned.js4
-rw-r--r--jstests/sharding/cleanup_orphaned_auth.js79
-rw-r--r--jstests/sharding/cleanup_orphaned_basic.js244
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js292
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js225
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_hashed.js131
-rw-r--r--jstests/sharding/cleanup_orphaned_cmd_prereload.js118
-rw-r--r--jstests/sharding/coll_epoch_test0.js50
-rw-r--r--jstests/sharding/coll_epoch_test1.js108
-rw-r--r--jstests/sharding/coll_epoch_test2.js109
-rw-r--r--jstests/sharding/conf_server_write_concern.js66
-rw-r--r--jstests/sharding/config_rs_change.js4
-rw-r--r--jstests/sharding/config_rs_no_primary.js94
-rw-r--r--jstests/sharding/conn_pool_stats.js2
-rw-r--r--jstests/sharding/copydb_from_mongos.js30
-rw-r--r--jstests/sharding/count1.js346
-rw-r--r--jstests/sharding/count2.js76
-rw-r--r--jstests/sharding/count_config_servers.js88
-rw-r--r--jstests/sharding/count_slaveok.js134
-rw-r--r--jstests/sharding/covered_shard_key_indexes.js140
-rw-r--r--jstests/sharding/create_idx_empty_primary.js39
-rw-r--r--jstests/sharding/cursor1.js111
-rw-r--r--jstests/sharding/cursor_cleanup.js24
-rw-r--r--jstests/sharding/delete_during_migrate.js32
-rw-r--r--jstests/sharding/diffservers1.js31
-rw-r--r--jstests/sharding/disable_autosplit.js42
-rw-r--r--jstests/sharding/drop_configdb.js46
-rw-r--r--jstests/sharding/drop_sharded_db.js101
-rw-r--r--jstests/sharding/dump_coll_metadata.js78
-rw-r--r--jstests/sharding/empty_cluster_init.js29
-rw-r--r--jstests/sharding/empty_doc_results.js35
-rw-r--r--jstests/sharding/enable_sharding_basic.js68
-rw-r--r--jstests/sharding/error_propagation.js12
-rw-r--r--jstests/sharding/exact_shard_key_target.js70
-rw-r--r--jstests/sharding/explain_cmd.js67
-rw-r--r--jstests/sharding/explain_find_and_modify_sharded.js41
-rw-r--r--jstests/sharding/explain_read_pref.js79
-rw-r--r--jstests/sharding/fair_balancer_round.js21
-rw-r--r--jstests/sharding/features1.js378
-rw-r--r--jstests/sharding/features2.js348
-rw-r--r--jstests/sharding/features3.js256
-rw-r--r--jstests/sharding/find_and_modify_after_multi_write.js161
-rw-r--r--jstests/sharding/find_getmore_cmd.js14
-rw-r--r--jstests/sharding/findandmodify1.js92
-rw-r--r--jstests/sharding/findandmodify2.js83
-rw-r--r--jstests/sharding/forget_mr_temp_ns.js47
-rw-r--r--jstests/sharding/fts_score_sort_sharded.js46
-rw-r--r--jstests/sharding/geo_near_random1.js76
-rw-r--r--jstests/sharding/geo_near_random2.js95
-rw-r--r--jstests/sharding/geo_shardedgeonear.js38
-rw-r--r--jstests/sharding/group_slaveok.js95
-rw-r--r--jstests/sharding/hash_basic.js56
-rw-r--r--jstests/sharding/hash_shard1.js56
-rw-r--r--jstests/sharding/hash_shard_non_empty.js15
-rw-r--r--jstests/sharding/hash_shard_num_chunks.js51
-rw-r--r--jstests/sharding/hash_shard_unique_compound.js18
-rw-r--r--jstests/sharding/hash_single_shard.js16
-rw-r--r--jstests/sharding/hash_skey_split.js33
-rw-r--r--jstests/sharding/idhack_sharded.js4
-rw-r--r--jstests/sharding/implicit_db_creation.js52
-rw-r--r--jstests/sharding/in_memory_sort_limit.js86
-rw-r--r--jstests/sharding/index1.js734
-rw-r--r--jstests/sharding/inserts_consistent.js64
-rw-r--r--jstests/sharding/ismaster.js37
-rw-r--r--jstests/sharding/jumbo1.js74
-rw-r--r--jstests/sharding/key_many.js361
-rw-r--r--jstests/sharding/key_string.js114
-rw-r--r--jstests/sharding/lagged_config_secondary.js51
-rw-r--r--jstests/sharding/large_chunk.js93
-rw-r--r--jstests/sharding/large_skip_one_shard.js33
-rw-r--r--jstests/sharding/limit_push.js104
-rw-r--r--jstests/sharding/listDatabases.js36
-rw-r--r--jstests/sharding/listshards.js110
-rw-r--r--jstests/sharding/localhostAuthBypass.js175
-rw-r--r--jstests/sharding/major_version_check.js69
-rw-r--r--jstests/sharding/mapReduce_inSharded.js37
-rw-r--r--jstests/sharding/mapReduce_inSharded_outSharded.js38
-rw-r--r--jstests/sharding/mapReduce_nonSharded.js37
-rw-r--r--jstests/sharding/mapReduce_outSharded.js38
-rw-r--r--jstests/sharding/map_reduce_validation.js58
-rw-r--r--jstests/sharding/max_time_ms_sharded.js439
-rw-r--r--jstests/sharding/merge_chunks_basic.js76
-rw-r--r--jstests/sharding/merge_chunks_test.js150
-rw-r--r--jstests/sharding/merge_chunks_test_with_md_ops.js74
-rw-r--r--jstests/sharding/migrateBig.js116
-rw-r--r--jstests/sharding/migrateBig_balancer.js94
-rw-r--r--jstests/sharding/migrate_overwrite_id.js42
-rw-r--r--jstests/sharding/migration_failure.js74
-rw-r--r--jstests/sharding/migration_ignore_interrupts.js620
-rw-r--r--jstests/sharding/migration_sets_fromMigrate_flag.js291
-rw-r--r--jstests/sharding/migration_with_source_ops.js250
-rw-r--r--jstests/sharding/min_optime_recovery.js124
-rw-r--r--jstests/sharding/missing_key.js31
-rw-r--r--jstests/sharding/mongos_no_detect_sharding.js50
-rw-r--r--jstests/sharding/mongos_no_replica_set_refresh.js134
-rw-r--r--jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js270
-rw-r--r--jstests/sharding/mongos_rs_shard_failure_tolerance.js728
-rw-r--r--jstests/sharding/mongos_shard_failure_tolerance.js171
-rw-r--r--jstests/sharding/mongos_validate_backoff.js86
-rw-r--r--jstests/sharding/mongos_validate_writes.js80
-rw-r--r--jstests/sharding/movePrimary1.js98
-rw-r--r--jstests/sharding/move_chunk_basic.js119
-rw-r--r--jstests/sharding/move_chunk_missing_idx.js33
-rw-r--r--jstests/sharding/move_primary_basic.js74
-rw-r--r--jstests/sharding/move_stale_mongos.js5
-rw-r--r--jstests/sharding/movechunk_include.js34
-rw-r--r--jstests/sharding/movechunk_with_default_paranoia.js9
-rw-r--r--jstests/sharding/movechunk_with_moveParanoia.js14
-rw-r--r--jstests/sharding/movechunk_with_noMoveParanoia.js14
-rw-r--r--jstests/sharding/moveprimary_ignore_sharded.js97
-rw-r--r--jstests/sharding/mrShardedOutput.js91
-rw-r--r--jstests/sharding/mrShardedOutputAuth.js155
-rw-r--r--jstests/sharding/mr_and_agg_versioning.js93
-rw-r--r--jstests/sharding/mr_noscripting.js24
-rw-r--r--jstests/sharding/mr_shard_version.js151
-rw-r--r--jstests/sharding/multi_coll_drop.js54
-rw-r--r--jstests/sharding/multi_mongos2.js97
-rw-r--r--jstests/sharding/multi_mongos2a.js43
-rw-r--r--jstests/sharding/multi_write_target.js98
-rw-r--r--jstests/sharding/names.js70
-rw-r--r--jstests/sharding/noUpdateButN1inAnotherCollection.js44
-rw-r--r--jstests/sharding/no_empty_reset.js59
-rw-r--r--jstests/sharding/parallel.js94
-rw-r--r--jstests/sharding/pending_chunk.js155
-rw-r--r--jstests/sharding/prefix_shard_key.js201
-rw-r--r--jstests/sharding/presplit.js75
-rw-r--r--jstests/sharding/printShardingStatus.js422
-rw-r--r--jstests/sharding/query_after_multi_write.js127
-rw-r--r--jstests/sharding/query_config.js168
-rw-r--r--jstests/sharding/query_sharded.js20
-rw-r--r--jstests/sharding/read_after_optime.js25
-rw-r--r--jstests/sharding/read_does_not_create_namespaces.js14
-rw-r--r--[-rwxr-xr-x]jstests/sharding/read_pref.js108
-rw-r--r--jstests/sharding/read_pref_cmd.js181
-rw-r--r--jstests/sharding/read_pref_multi_mongos_stale_config.js29
-rw-r--r--jstests/sharding/recovering_slaveok.js168
-rw-r--r--jstests/sharding/regex_targeting.js294
-rw-r--r--jstests/sharding/remove1.js44
-rw-r--r--jstests/sharding/remove2.js177
-rw-r--r--jstests/sharding/remove3.js86
-rw-r--r--jstests/sharding/rename.js83
-rw-r--r--jstests/sharding/rename_across_mongos.js38
-rw-r--r--jstests/sharding/repl_monitor_refresh.js112
-rw-r--r--jstests/sharding/replmonitor_bad_seed.js21
-rw-r--r--jstests/sharding/return_partial_shards_down.js35
-rw-r--r--jstests/sharding/rs_stepdown_and_pooling.js207
-rw-r--r--jstests/sharding/secondary_query_routing.js48
-rw-r--r--jstests/sharding/server_status.js87
-rw-r--r--jstests/sharding/shard1.js59
-rw-r--r--jstests/sharding/shard2.js282
-rw-r--r--jstests/sharding/shard3.js372
-rw-r--r--jstests/sharding/shard4.js78
-rw-r--r--jstests/sharding/shard5.js78
-rw-r--r--jstests/sharding/shard6.js114
-rw-r--r--jstests/sharding/shard7.js75
-rw-r--r--jstests/sharding/shard_collection_basic.js228
-rw-r--r--jstests/sharding/shard_existing.js56
-rw-r--r--jstests/sharding/shard_insert_getlasterror_w2.js55
-rw-r--r--jstests/sharding/shard_key_immutable.js549
-rw-r--r--jstests/sharding/shard_keycount.js60
-rw-r--r--jstests/sharding/shard_kill_and_pooling.js162
-rw-r--r--jstests/sharding/shard_targeting.js88
-rw-r--r--jstests/sharding/shard_with_special_db_names.js40
-rw-r--r--jstests/sharding/sharded_limit_batchsize.js219
-rw-r--r--jstests/sharding/sharded_profile.js36
-rw-r--r--jstests/sharding/sharding_balance1.js125
-rw-r--r--jstests/sharding/sharding_balance2.js101
-rw-r--r--jstests/sharding/sharding_balance3.js127
-rw-r--r--jstests/sharding/sharding_balance4.js244
-rw-r--r--jstests/sharding/sharding_migrate_cursor1.js114
-rw-r--r--jstests/sharding/sharding_multiple_ns_rs.js74
-rw-r--r--jstests/sharding/sharding_options.js113
-rw-r--r--jstests/sharding/sharding_rs1.js81
-rw-r--r--jstests/sharding/sharding_rs2.js377
-rw-r--r--jstests/sharding/sharding_state_after_stepdown.js323
-rw-r--r--jstests/sharding/sharding_system_namespaces.js39
-rw-r--r--jstests/sharding/sort1.js190
-rw-r--r--jstests/sharding/split_chunk.js195
-rw-r--r--jstests/sharding/split_large_key.js107
-rw-r--r--jstests/sharding/split_with_force.js62
-rw-r--r--jstests/sharding/split_with_force_small.js64
-rw-r--r--jstests/sharding/ssv_config_check.js108
-rw-r--r--jstests/sharding/stale_mongos_updates_and_removes.js87
-rw-r--r--jstests/sharding/stale_version_write.js30
-rw-r--r--jstests/sharding/startup_with_all_configs_down.js103
-rw-r--r--jstests/sharding/stats.js357
-rw-r--r--jstests/sharding/tag_auto_split.js86
-rw-r--r--jstests/sharding/tag_range.js68
-rw-r--r--jstests/sharding/test_stacked_migration_cleanup.js95
-rw-r--r--jstests/sharding/top_chunk_autosplit.js314
-rw-r--r--jstests/sharding/trace_missing_docs_test.js60
-rw-r--r--jstests/sharding/unowned_doc_filtering.js67
-rw-r--r--jstests/sharding/update_immutable_fields.js102
-rw-r--r--jstests/sharding/update_sharded.js192
-rw-r--r--jstests/sharding/upsert_sharded.js195
-rw-r--r--jstests/sharding/user_flags_sharded.js97
-rw-r--r--jstests/sharding/version1.js162
-rw-r--r--jstests/sharding/version2.js91
-rw-r--r--jstests/sharding/write_cmd_auto_split.js209
-rw-r--r--jstests/sharding/write_commands_sharding_state.js109
-rw-r--r--jstests/sharding/zbigMapReduce.js121
-rw-r--r--jstests/sharding/zero_shard_version.js333
236 files changed, 14476 insertions, 14365 deletions
diff --git a/jstests/sharding/SERVER-7379.js b/jstests/sharding/SERVER-7379.js
index c637f10c6b4..bdf311cbf6e 100644
--- a/jstests/sharding/SERVER-7379.js
+++ b/jstests/sharding/SERVER-7379.js
@@ -1,46 +1,48 @@
-var st = new ShardingTest({ shards: 2 });
+var st = new ShardingTest({shards: 2});
-st.adminCommand({ enablesharding: "test" });
+st.adminCommand({enablesharding: "test"});
st.ensurePrimaryShard('test', 'shard0001');
-st.adminCommand({ shardcollection: "test.offerChange", key: { "categoryId": 1, "store": 1, "_id": 1 } });
+st.adminCommand(
+ {shardcollection: "test.offerChange", key: {"categoryId": 1, "store": 1, "_id": 1}});
var db = st.s.getDB('test');
var offerChange = db.getCollection('offerChange');
-var testDoc = { "_id": 123, "categoryId": 9881, "store": "NEW" };
+var testDoc = {
+ "_id": 123,
+ "categoryId": 9881,
+ "store": "NEW"
+};
offerChange.remove({}, false);
offerChange.insert(testDoc);
-assert.writeError(offerChange.update({ _id: 123 }, { $set: { store: "NEWEST" } }, true, false));
+assert.writeError(offerChange.update({_id: 123}, {$set: {store: "NEWEST"}}, true, false));
var doc = offerChange.findOne();
assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc));
offerChange.remove({}, false);
offerChange.insert(testDoc);
-assert.writeError(offerChange.update({ _id: 123 },
- { _id: 123, categoryId: 9881, store: "NEWEST" },
- true, false));
+assert.writeError(
+ offerChange.update({_id: 123}, {_id: 123, categoryId: 9881, store: "NEWEST"}, true, false));
doc = offerChange.findOne();
assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc));
offerChange.remove({}, false);
offerChange.insert(testDoc);
-assert.writeError(offerChange.save({ "_id": 123, "categoryId": 9881, "store": "NEWEST" }));
+assert.writeError(offerChange.save({"_id": 123, "categoryId": 9881, "store": "NEWEST"}));
doc = offerChange.findOne();
assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc));
offerChange.remove({}, false);
offerChange.insert(testDoc);
-assert.writeError(offerChange.update({ _id: 123, store: "NEW" },
- { _id: 123, categoryId: 9881, store: "NEWEST" },
- true, false));
+assert.writeError(offerChange.update(
+ {_id: 123, store: "NEW"}, {_id: 123, categoryId: 9881, store: "NEWEST"}, true, false));
doc = offerChange.findOne();
assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc));
offerChange.remove({}, false);
offerChange.insert(testDoc);
-assert.writeError(offerChange.update({ _id: 123, categoryId: 9881 },
- { _id: 123, categoryId: 9881, store: "NEWEST" },
- true, false));
+assert.writeError(offerChange.update(
+ {_id: 123, categoryId: 9881}, {_id: 123, categoryId: 9881, store: "NEWEST"}, true, false));
doc = offerChange.findOne();
assert(friendlyEqual(doc, testDoc), 'doc changed: ' + tojson(doc));
diff --git a/jstests/sharding/add_invalid_shard.js b/jstests/sharding/add_invalid_shard.js
index 7dfa6d0f819..357cf252356 100644
--- a/jstests/sharding/add_invalid_shard.js
+++ b/jstests/sharding/add_invalid_shard.js
@@ -3,47 +3,47 @@
*/
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({ shards: 1 });
+ var st = new ShardingTest({shards: 1});
-var configDB = st.s.getDB('config');
-var shardDoc = configDB.shards.findOne();
+ var configDB = st.s.getDB('config');
+ var shardDoc = configDB.shards.findOne();
-// Can't add mongos as shard.
-assert.commandFailed(st.admin.runCommand({ addshard: st.s.host }));
+ // Can't add mongos as shard.
+ assert.commandFailed(st.admin.runCommand({addshard: st.s.host}));
-// Can't add config servers as shard.
-assert.commandFailed(st.admin.runCommand({ addshard: st._configDB }));
+ // Can't add config servers as shard.
+ assert.commandFailed(st.admin.runCommand({addshard: st._configDB}));
-var replTest = new ReplSetTest({ nodes: 2 });
-replTest.startSet({ oplogSize: 10 });
-replTest.initiate();
+ var replTest = new ReplSetTest({nodes: 2});
+ replTest.startSet({oplogSize: 10});
+ replTest.initiate();
-var rsConnStr = replTest.getURL();
-// Can't add replSet as shard if the name doesn't match the replSet config.
-assert.commandFailed(st.admin.runCommand({ addshard: "prefix_" + rsConnStr }));
+ var rsConnStr = replTest.getURL();
+ // Can't add replSet as shard if the name doesn't match the replSet config.
+ assert.commandFailed(st.admin.runCommand({addshard: "prefix_" + rsConnStr}));
-assert.commandWorked(st.admin.runCommand({ addshard: rsConnStr, name: 'dummyRS' }));
+ assert.commandWorked(st.admin.runCommand({addshard: rsConnStr, name: 'dummyRS'}));
-// Cannot add the same replSet shard host twice.
-assert.commandFailed(st.admin.runCommand({ addshard: rsConnStr }));
+ // Cannot add the same replSet shard host twice.
+ assert.commandFailed(st.admin.runCommand({addshard: rsConnStr}));
-// Cannot add the same replSet shard host twice even when using a unique shard name.
-assert.commandFailed(st.admin.runCommand({ addshard: rsConnStr, name: 'dupRS' }));
+ // Cannot add the same replSet shard host twice even when using a unique shard name.
+ assert.commandFailed(st.admin.runCommand({addshard: rsConnStr, name: 'dupRS'}));
-// Cannot add the same replSet shard host twice even when using an valid variant of the replSet
-// connection string.
-var truncatedRSConnStr = rsConnStr.substring(0, rsConnStr.indexOf(','));
-assert.commandFailed(st.admin.runCommand({ addshard: truncatedRSConnStr, name: 'dupRS' }));
+ // Cannot add the same replSet shard host twice even when using an valid variant of the replSet
+ // connection string.
+ var truncatedRSConnStr = rsConnStr.substring(0, rsConnStr.indexOf(','));
+ assert.commandFailed(st.admin.runCommand({addshard: truncatedRSConnStr, name: 'dupRS'}));
-// Cannot add the same stand alone shard host twice.
-assert.commandFailed(st.admin.runCommand({ addshard: shardDoc.host }));
+ // Cannot add the same stand alone shard host twice.
+ assert.commandFailed(st.admin.runCommand({addshard: shardDoc.host}));
-// Cannot add the same stand alone shard host twice even with a unique shard name.
-assert.commandFailed(st.admin.runCommand({ addshard: shardDoc.host, name: 'dupShard' }));
+ // Cannot add the same stand alone shard host twice even with a unique shard name.
+ assert.commandFailed(st.admin.runCommand({addshard: shardDoc.host, name: 'dupShard'}));
-replTest.stopSet();
-st.stop();
+ replTest.stopSet();
+ st.stop();
})();
diff --git a/jstests/sharding/addshard1.js b/jstests/sharding/addshard1.js
index db8818b1e0f..1bea66e21c6 100644
--- a/jstests/sharding/addshard1.js
+++ b/jstests/sharding/addshard1.js
@@ -1,72 +1,80 @@
(function() {
-'use strict';
+ 'use strict';
-var s = new ShardingTest({name: "add_shard1", shards: 1, useHostname: false });
+ var s = new ShardingTest({name: "add_shard1", shards: 1, useHostname: false});
-// Create a shard and add a database; if the database is not duplicated the mongod should accept
-// it as shard
-var conn1 = MongoRunner.runMongod({});
-var db1 = conn1.getDB("testDB");
+ // Create a shard and add a database; if the database is not duplicated the mongod should accept
+ // it as shard
+ var conn1 = MongoRunner.runMongod({});
+ var db1 = conn1.getDB("testDB");
-var numObjs = 3;
-for (var i = 0; i < numObjs; i++){
- assert.writeOK(db1.foo.save({ a : i }));
-}
+ var numObjs = 3;
+ for (var i = 0; i < numObjs; i++) {
+ assert.writeOK(db1.foo.save({a: i}));
+ }
-var configDB = s.s.getDB('config');
-assert.eq(null, configDB.databases.findOne({ _id: 'testDB' }));
+ var configDB = s.s.getDB('config');
+ assert.eq(null, configDB.databases.findOne({_id: 'testDB'}));
-var newShard = "myShard";
-assert.commandWorked(s.admin.runCommand({ addshard: "localhost:" + conn1.port,
- name: newShard,
- maxSize: 1024 }));
+ var newShard = "myShard";
+ assert.commandWorked(
+ s.admin.runCommand({addshard: "localhost:" + conn1.port, name: newShard, maxSize: 1024}));
-assert.neq(null, configDB.databases.findOne({ _id: 'testDB' }));
+ assert.neq(null, configDB.databases.findOne({_id: 'testDB'}));
-var newShardDoc = configDB.shards.findOne({ _id: newShard });
-assert.eq(1024, newShardDoc.maxSize);
+ var newShardDoc = configDB.shards.findOne({_id: newShard});
+ assert.eq(1024, newShardDoc.maxSize);
-// a mongod with an existing database name should not be allowed to become a shard
-var conn2 = MongoRunner.runMongod({});
+ // a mongod with an existing database name should not be allowed to become a shard
+ var conn2 = MongoRunner.runMongod({});
-var db2 = conn2.getDB("otherDB");
-assert.writeOK(db2.foo.save({ a: 1 }));
+ var db2 = conn2.getDB("otherDB");
+ assert.writeOK(db2.foo.save({a: 1}));
-var db3 = conn2.getDB("testDB");
-assert.writeOK(db3.foo.save({ a: 1 }));
+ var db3 = conn2.getDB("testDB");
+ assert.writeOK(db3.foo.save({a: 1}));
-s.config.databases.find().forEach(printjson);
+ s.config.databases.find().forEach(printjson);
-var rejectedShard = "rejectedShard";
-assert(!s.admin.runCommand({ addshard: "localhost:" + conn2.port, name : rejectedShard }).ok,
- "accepted mongod with duplicate db");
+ var rejectedShard = "rejectedShard";
+ assert(!s.admin.runCommand({addshard: "localhost:" + conn2.port, name: rejectedShard}).ok,
+ "accepted mongod with duplicate db");
-// Check that all collection that were local to the mongod's are accessible through the mongos
-var sdb1 = s.getDB("testDB");
-assert.eq(numObjs, sdb1.foo.count(), "wrong count for database that existed before addshard");
+ // Check that all collection that were local to the mongod's are accessible through the mongos
+ var sdb1 = s.getDB("testDB");
+ assert.eq(numObjs, sdb1.foo.count(), "wrong count for database that existed before addshard");
-var sdb2 = s.getDB("otherDB");
-assert.eq(0, sdb2.foo.count(), "database of rejected shard appears through mongos");
+ var sdb2 = s.getDB("otherDB");
+ assert.eq(0, sdb2.foo.count(), "database of rejected shard appears through mongos");
-// make sure we can move a DB from the original mongod to a previoulsy existing shard
-assert.eq(s.normalize(s.config.databases.findOne({ _id : "testDB" }).primary), newShard, "DB primary is wrong");
+ // make sure we can move a DB from the original mongod to a previoulsy existing shard
+ assert.eq(s.normalize(s.config.databases.findOne({_id: "testDB"}).primary),
+ newShard,
+ "DB primary is wrong");
-var origShard = s.getNonPrimaries("testDB")[0];
-s.adminCommand({ moveprimary : "testDB", to : origShard });
-assert.eq(s.normalize(s.config.databases.findOne({ _id : "testDB" }).primary), origShard, "DB primary didn't move");
-assert.eq(numObjs, sdb1.foo.count(), "wrong count after moving datbase that existed before addshard");
+ var origShard = s.getNonPrimaries("testDB")[0];
+ s.adminCommand({moveprimary: "testDB", to: origShard});
+ assert.eq(s.normalize(s.config.databases.findOne({_id: "testDB"}).primary),
+ origShard,
+ "DB primary didn't move");
+ assert.eq(
+ numObjs, sdb1.foo.count(), "wrong count after moving datbase that existed before addshard");
-// make sure we can shard the original collections
-sdb1.foo.ensureIndex({ a : 1 }, { unique : true }); // can't shard populated collection without an index
-s.adminCommand({ enablesharding : "testDB" });
-s.adminCommand({ shardcollection : "testDB.foo", key: { a : 1 } });
-s.adminCommand({ split : "testDB.foo", middle: { a : Math.floor(numObjs/2) } });
-assert.eq(2, s.config.chunks.count(), "wrong chunk number after splitting collection that existed before");
-assert.eq(numObjs, sdb1.foo.count(), "wrong count after splitting collection that existed before");
+ // make sure we can shard the original collections
+ sdb1.foo.ensureIndex({a: 1},
+ {unique: true}); // can't shard populated collection without an index
+ s.adminCommand({enablesharding: "testDB"});
+ s.adminCommand({shardcollection: "testDB.foo", key: {a: 1}});
+ s.adminCommand({split: "testDB.foo", middle: {a: Math.floor(numObjs / 2)}});
+ assert.eq(2,
+ s.config.chunks.count(),
+ "wrong chunk number after splitting collection that existed before");
+ assert.eq(
+ numObjs, sdb1.foo.count(), "wrong count after splitting collection that existed before");
-MongoRunner.stopMongod(conn1);
-MongoRunner.stopMongod(conn2);
+ MongoRunner.stopMongod(conn1);
+ MongoRunner.stopMongod(conn2);
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/addshard2.js b/jstests/sharding/addshard2.js
index 2bd57cf1da4..7af23a4ab5b 100644
--- a/jstests/sharding/addshard2.js
+++ b/jstests/sharding/addshard2.js
@@ -1,129 +1,129 @@
(function() {
-// Don't start any shards, yet
-var s = new ShardingTest({name: "add_shard2",
- shards: 1,
- mongos: 1,
- other: {useHostname : true} });
-
-// Start two new instances, which will be used for shards
-var conn1 = MongoRunner.runMongod({useHostname: true});
-var conn2 = MongoRunner.runMongod({useHostname: true});
-
-var rs1 = new ReplSetTest( { "name" : "add_shard2_rs1", nodes : 3 } );
-rs1.startSet();
-rs1.initiate();
-var master1 = rs1.getPrimary();
-
-var rs2 = new ReplSetTest( { "name" : "add_shard2_rs2", nodes : 3 } );
-rs2.startSet();
-rs2.initiate();
-var master2 = rs2.getPrimary();
-
-// replica set with set name = 'config'
-var rs3 = new ReplSetTest({ 'name': 'config', nodes: 3 });
-rs3.startSet();
-rs3.initiate();
-
-// replica set with set name = 'admin'
-var rs4 = new ReplSetTest({ 'name': 'admin', nodes: 3 });
-rs4.startSet();
-rs4.initiate();
-
-// replica set with configsvr: true should *not* be allowed to be added as a shard
-var rs5 = new ReplSetTest({name: 'csrs',
- nodes: 3,
- nodeOptions: {configsvr: "",
- journal: "",
- storageEngine: "wiredTiger"}});
-rs5.startSet();
-var conf = rs5.getReplSetConfig();
-conf.configsvr = true;
-rs5.initiate(conf);
-
-
-// step 1. name given. maxSize zero means no limit. Make sure it is allowed.
-assert.commandWorked(s.admin.runCommand({ addshard: getHostName() + ":" + conn1.port,
- name: "bar",
- maxSize: 0 }));
-var shard = s.getDB("config").shards.findOne({"_id" : {"$nin" : ["shard0000"]}});
-assert(shard, "shard wasn't found");
-assert.eq("bar", shard._id, "shard has incorrect name");
-
-// step 2. replica set
-assert(s.admin.runCommand(
- {"addshard" : "add_shard2_rs1/" + getHostName() + ":" + master1.port}).ok,
- "failed to add shard in step 2");
-shard = s.getDB("config").shards.findOne({"_id" : {"$nin" : ["shard0000", "bar"]}});
-assert(shard, "shard wasn't found");
-assert.eq("add_shard2_rs1", shard._id, "t2 name");
-
-// step 3. replica set w/ name given
-assert(s.admin.runCommand({"addshard" : "add_shard2_rs2/" + getHostName() + ":" + master2.port,
- "name" : "myshard"}).ok,
- "failed to add shard in step 4");
-shard = s.getDB("config").shards.findOne({"_id" : {"$nin" : ["shard0000", "bar", "add_shard2_rs1"]}});
-assert(shard, "shard wasn't found");
-assert.eq("myshard", shard._id, "t3 name");
-
-// step 4. no name given
-assert(s.admin.runCommand({"addshard" : getHostName()+":" + conn2.port}).ok,
- "failed to add shard in step 4");
-shard = s.getDB("config").shards.findOne({"_id" : {"$nin" : ["shard0000", "bar", "add_shard2_rs1", "myshard"]}});
-assert(shard, "shard wasn't found");
-assert.eq("shard0001", shard._id, "t4 name");
-
-assert.eq(s.getDB("config").shards.count(), 5, "unexpected number of shards");
-
-// step 5. replica set w/ a wrong host
-var portWithoutHostRunning = allocatePort();
-assert(!s.admin.runCommand({
- addshard: "add_shard2_rs2/NonExistingHost:" + portWithoutHostRunning
- }).ok,
- "accepted bad hostname in step 5");
-
-// step 6. replica set w/ mixed wrong/right hosts
-assert(!s.admin.runCommand({
- addshard: "add_shard2_rs2/" + getHostName() + ":" + master2.port +
- ",foo:" + portWithoutHostRunning
- }).ok,
- "accepted bad hostname in step 6");
-
-// Cannot add invalid stand alone host.
-assert.commandFailed(s.admin.runCommand({ addshard: 'dummy:12345' }));
-
-//
-// SERVER-17231 Adding replica set w/ set name = 'config'
-//
-var configReplURI = 'config/' + getHostName() + ':' + rs3.getPrimary().port;
-
-assert(!s.admin.runCommand({ 'addshard': configReplURI }).ok,
- 'accepted replica set shard with set name "config"');
-// but we should be allowed to add that replica set using a different shard name
-assert(s.admin.runCommand({ 'addshard': configReplURI, name: 'not_config' }).ok,
- 'unable to add replica set using valid replica set name');
-
-shard = s.getDB('config').shards.findOne({ '_id': 'not_config' });
-assert(shard, 'shard with name "not_config" not found');
-
-//
-// SERVER-17232 Try inserting into shard with name 'admin'
-//
-assert(s.admin.runCommand({ 'addshard': 'admin/' + getHostName() + ':' + rs4.getPrimary().port}).ok,
- 'adding replica set with name "admin" should work');
-var wRes = s.getDB('test').foo.insert({ x: 1 });
-assert(!wRes.hasWriteError() && wRes.nInserted === 1,
- 'failed to insert document into "test.foo" unsharded collection');
-
-// SERVER-19545 Should not be able to add config server replsets as shards.
-assert.commandFailed(s.admin.runCommand({addshard: rs5.getURL()}));
-
-s.stop();
-
-rs1.stopSet();
-rs2.stopSet();
-rs3.stopSet();
-rs4.stopSet();
-rs5.stopSet();
+ // Don't start any shards, yet
+ var s =
+ new ShardingTest({name: "add_shard2", shards: 1, mongos: 1, other: {useHostname: true}});
+
+ // Start two new instances, which will be used for shards
+ var conn1 = MongoRunner.runMongod({useHostname: true});
+ var conn2 = MongoRunner.runMongod({useHostname: true});
+
+ var rs1 = new ReplSetTest({"name": "add_shard2_rs1", nodes: 3});
+ rs1.startSet();
+ rs1.initiate();
+ var master1 = rs1.getPrimary();
+
+ var rs2 = new ReplSetTest({"name": "add_shard2_rs2", nodes: 3});
+ rs2.startSet();
+ rs2.initiate();
+ var master2 = rs2.getPrimary();
+
+ // replica set with set name = 'config'
+ var rs3 = new ReplSetTest({'name': 'config', nodes: 3});
+ rs3.startSet();
+ rs3.initiate();
+
+ // replica set with set name = 'admin'
+ var rs4 = new ReplSetTest({'name': 'admin', nodes: 3});
+ rs4.startSet();
+ rs4.initiate();
+
+ // replica set with configsvr: true should *not* be allowed to be added as a shard
+ var rs5 = new ReplSetTest({
+ name: 'csrs',
+ nodes: 3,
+ nodeOptions: {configsvr: "", journal: "", storageEngine: "wiredTiger"}
+ });
+ rs5.startSet();
+ var conf = rs5.getReplSetConfig();
+ conf.configsvr = true;
+ rs5.initiate(conf);
+
+ // step 1. name given. maxSize zero means no limit. Make sure it is allowed.
+ assert.commandWorked(
+ s.admin.runCommand({addshard: getHostName() + ":" + conn1.port, name: "bar", maxSize: 0}));
+ var shard = s.getDB("config").shards.findOne({"_id": {"$nin": ["shard0000"]}});
+ assert(shard, "shard wasn't found");
+ assert.eq("bar", shard._id, "shard has incorrect name");
+
+ // step 2. replica set
+ assert(
+ s.admin.runCommand({"addshard": "add_shard2_rs1/" + getHostName() + ":" + master1.port}).ok,
+ "failed to add shard in step 2");
+ shard = s.getDB("config").shards.findOne({"_id": {"$nin": ["shard0000", "bar"]}});
+ assert(shard, "shard wasn't found");
+ assert.eq("add_shard2_rs1", shard._id, "t2 name");
+
+ // step 3. replica set w/ name given
+ assert(s.admin.runCommand({
+ "addshard": "add_shard2_rs2/" + getHostName() + ":" + master2.port,
+ "name": "myshard"
+ }).ok,
+ "failed to add shard in step 4");
+ shard = s.getDB("config")
+ .shards.findOne({"_id": {"$nin": ["shard0000", "bar", "add_shard2_rs1"]}});
+ assert(shard, "shard wasn't found");
+ assert.eq("myshard", shard._id, "t3 name");
+
+ // step 4. no name given
+ assert(s.admin.runCommand({"addshard": getHostName() + ":" + conn2.port}).ok,
+ "failed to add shard in step 4");
+ shard = s.getDB("config").shards.findOne(
+ {"_id": {"$nin": ["shard0000", "bar", "add_shard2_rs1", "myshard"]}});
+ assert(shard, "shard wasn't found");
+ assert.eq("shard0001", shard._id, "t4 name");
+
+ assert.eq(s.getDB("config").shards.count(), 5, "unexpected number of shards");
+
+ // step 5. replica set w/ a wrong host
+ var portWithoutHostRunning = allocatePort();
+ assert(!s.admin.runCommand(
+ {addshard: "add_shard2_rs2/NonExistingHost:" + portWithoutHostRunning}).ok,
+ "accepted bad hostname in step 5");
+
+ // step 6. replica set w/ mixed wrong/right hosts
+ assert(!s.admin.runCommand({
+ addshard: "add_shard2_rs2/" + getHostName() + ":" + master2.port + ",foo:" +
+ portWithoutHostRunning
+ }).ok,
+ "accepted bad hostname in step 6");
+
+ // Cannot add invalid stand alone host.
+ assert.commandFailed(s.admin.runCommand({addshard: 'dummy:12345'}));
+
+ //
+ // SERVER-17231 Adding replica set w/ set name = 'config'
+ //
+ var configReplURI = 'config/' + getHostName() + ':' + rs3.getPrimary().port;
+
+ assert(!s.admin.runCommand({'addshard': configReplURI}).ok,
+ 'accepted replica set shard with set name "config"');
+ // but we should be allowed to add that replica set using a different shard name
+ assert(s.admin.runCommand({'addshard': configReplURI, name: 'not_config'}).ok,
+ 'unable to add replica set using valid replica set name');
+
+ shard = s.getDB('config').shards.findOne({'_id': 'not_config'});
+ assert(shard, 'shard with name "not_config" not found');
+
+ //
+ // SERVER-17232 Try inserting into shard with name 'admin'
+ //
+ assert(
+ s.admin.runCommand({'addshard': 'admin/' + getHostName() + ':' + rs4.getPrimary().port}).ok,
+ 'adding replica set with name "admin" should work');
+ var wRes = s.getDB('test').foo.insert({x: 1});
+ assert(!wRes.hasWriteError() && wRes.nInserted === 1,
+ 'failed to insert document into "test.foo" unsharded collection');
+
+ // SERVER-19545 Should not be able to add config server replsets as shards.
+ assert.commandFailed(s.admin.runCommand({addshard: rs5.getURL()}));
+
+ s.stop();
+
+ rs1.stopSet();
+ rs2.stopSet();
+ rs3.stopSet();
+ rs4.stopSet();
+ rs5.stopSet();
})();
diff --git a/jstests/sharding/addshard4.js b/jstests/sharding/addshard4.js
index 2a66cbc74fe..de2c8a17c10 100644
--- a/jstests/sharding/addshard4.js
+++ b/jstests/sharding/addshard4.js
@@ -1,60 +1,61 @@
// A replica set's passive nodes should be okay to add as part of a shard config
(function() {
-var s = new ShardingTest({ name: "addshard4",
- shards: 2,
- mongos: 1,
- other: {useHostname : true} });
+ var s = new ShardingTest({name: "addshard4", shards: 2, mongos: 1, other: {useHostname: true}});
-var r = new ReplSetTest({name: "addshard4", nodes: 3});
-r.startSet();
+ var r = new ReplSetTest({name: "addshard4", nodes: 3});
+ r.startSet();
-var config = r.getReplSetConfig();
-config.members[2].priority = 0;
+ var config = r.getReplSetConfig();
+ config.members[2].priority = 0;
-r.initiate(config);
-//Wait for replica set to be fully initialized - could take some time
-//to pre-allocate files on slow systems
-r.awaitReplication();
+ r.initiate(config);
+ // Wait for replica set to be fully initialized - could take some time
+ // to pre-allocate files on slow systems
+ r.awaitReplication();
-var master = r.getPrimary();
+ var master = r.getPrimary();
-var members = config.members.map(function(elem) { return elem.host; });
-var shardName = "addshard4/"+members.join(",");
-var invalidShardName = "addshard4/foobar";
+ var members = config.members.map(function(elem) {
+ return elem.host;
+ });
+ var shardName = "addshard4/" + members.join(",");
+ var invalidShardName = "addshard4/foobar";
-print("adding shard "+shardName);
+ print("adding shard " + shardName);
-// First try adding shard with the correct replica set name but incorrect hostname
-// This will make sure that the metadata for this replica set name is cleaned up
-// so that the set can be added correctly when it has the proper hostnames.
-assert.throws(function() {s.adminCommand({"addshard" : invalidShardName});});
+ // First try adding shard with the correct replica set name but incorrect hostname
+ // This will make sure that the metadata for this replica set name is cleaned up
+ // so that the set can be added correctly when it has the proper hostnames.
+ assert.throws(function() {
+ s.adminCommand({"addshard": invalidShardName});
+ });
-var result = s.adminCommand({"addshard" : shardName});
+ var result = s.adminCommand({"addshard": shardName});
-printjson(result);
-assert.eq(result, true);
+ printjson(result);
+ assert.eq(result, true);
-r = new ReplSetTest({name : "addshard42", nodes : 3});
-r.startSet();
+ r = new ReplSetTest({name: "addshard42", nodes: 3});
+ r.startSet();
-config = r.getReplSetConfig();
-config.members[2].arbiterOnly = true;
+ config = r.getReplSetConfig();
+ config.members[2].arbiterOnly = true;
-r.initiate(config);
-// Wait for replica set to be fully initialized - could take some time
-// to pre-allocate files on slow systems
-r.awaitReplication();
+ r.initiate(config);
+ // Wait for replica set to be fully initialized - could take some time
+ // to pre-allocate files on slow systems
+ r.awaitReplication();
-master = r.getPrimary();
+ master = r.getPrimary();
-print("adding shard addshard42");
+ print("adding shard addshard42");
-result = s.adminCommand({"addshard" : "addshard42/"+config.members[2].host});
+ result = s.adminCommand({"addshard": "addshard42/" + config.members[2].host});
-printjson(result);
-assert.eq(result, true);
+ printjson(result);
+ assert.eq(result, true);
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/addshard5.js b/jstests/sharding/addshard5.js
index bf24943972e..c420c90de51 100644
--- a/jstests/sharding/addshard5.js
+++ b/jstests/sharding/addshard5.js
@@ -3,55 +3,56 @@
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards: 2, mongos: 1 });
+ var st = new ShardingTest({shards: 2, mongos: 1});
-var mongos = st.s;
-var admin = mongos.getDB('admin');
-var config = mongos.getDB('config');
-var coll = mongos.getCollection('foo.bar');
+ var mongos = st.s;
+ var admin = mongos.getDB('admin');
+ var config = mongos.getDB('config');
+ var coll = mongos.getCollection('foo.bar');
-// Get all the shard info and connections
-var shards = [];
-config.shards.find().sort({ _id: 1 }).forEach(function(doc) {
- shards.push(Object.merge(doc, { conn: new Mongo(doc.host) }));
-});
+ // Get all the shard info and connections
+ var shards = [];
+ config.shards.find().sort({_id: 1}).forEach(function(doc) {
+ shards.push(Object.merge(doc, {conn: new Mongo(doc.host)}));
+ });
-// Shard collection
-assert.commandWorked(mongos.adminCommand({ enableSharding: coll.getDB() + ''}));
+ // Shard collection
+ assert.commandWorked(mongos.adminCommand({enableSharding: coll.getDB() + ''}));
-// Just to be sure what primary we start from
-st.ensurePrimaryShard(coll.getDB().getName(), shards[0]._id);
-assert.commandWorked(mongos.adminCommand({ shardCollection: coll + '', key: { _id: 1 } }));
+ // Just to be sure what primary we start from
+ st.ensurePrimaryShard(coll.getDB().getName(), shards[0]._id);
+ assert.commandWorked(mongos.adminCommand({shardCollection: coll + '', key: {_id: 1}}));
-// Insert one document
-assert.writeOK(coll.insert({ hello: 'world'}));
+ // Insert one document
+ assert.writeOK(coll.insert({hello: 'world'}));
-// Migrate the collection to and from shard1 so shard0 loads the shard1 host
-assert.commandWorked(mongos.adminCommand(
- { moveChunk: coll + '', find: { _id: 0 }, to: shards[1]._id, _waitForDelete: true }));
-assert.commandWorked(mongos.adminCommand(
- { moveChunk: coll + '', find: { _id: 0 }, to: shards[0]._id, _waitForDelete: true }));
+ // Migrate the collection to and from shard1 so shard0 loads the shard1 host
+ assert.commandWorked(mongos.adminCommand(
+ {moveChunk: coll + '', find: {_id: 0}, to: shards[1]._id, _waitForDelete: true}));
+ assert.commandWorked(mongos.adminCommand(
+ {moveChunk: coll + '', find: {_id: 0}, to: shards[0]._id, _waitForDelete: true}));
-// Drop and re-add shard with the same name but a new host.
-assert.commandWorked(mongos.adminCommand({ removeShard: shards[1]._id }));
-assert.commandWorked(mongos.adminCommand({ removeShard: shards[1]._id }));
+ // Drop and re-add shard with the same name but a new host.
+ assert.commandWorked(mongos.adminCommand({removeShard: shards[1]._id}));
+ assert.commandWorked(mongos.adminCommand({removeShard: shards[1]._id}));
-var shard2 = MongoRunner.runMongod({});
-assert.commandWorked(mongos.adminCommand({ addShard: shard2.host, name: shards[1]._id }));
+ var shard2 = MongoRunner.runMongod({});
+ assert.commandWorked(mongos.adminCommand({addShard: shard2.host, name: shards[1]._id}));
-jsTest.log('Shard was dropped and re-added with same name...');
-st.printShardingStatus();
+ jsTest.log('Shard was dropped and re-added with same name...');
+ st.printShardingStatus();
-shards[0].conn.getDB('admin').runCommand({ setParameter: 1, traceExceptions: true });
-shard2.getDB('admin').runCommand({ setParameter: 1, traceExceptions: true });
+ shards[0].conn.getDB('admin').runCommand({setParameter: 1, traceExceptions: true});
+ shard2.getDB('admin').runCommand({setParameter: 1, traceExceptions: true});
-// Try a migration
-assert.commandWorked(mongos.adminCommand({ moveChunk: coll + '', find: { _id: 0 }, to: shards[1]._id }));
+ // Try a migration
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: coll + '', find: {_id: 0}, to: shards[1]._id}));
-assert.eq('world', shard2.getCollection(coll + '').findOne().hello);
+ assert.eq('world', shard2.getCollection(coll + '').findOne().hello);
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/all_config_hosts_down.js b/jstests/sharding/all_config_hosts_down.js
index 5827480dca4..3abd0d14feb 100644
--- a/jstests/sharding/all_config_hosts_down.js
+++ b/jstests/sharding/all_config_hosts_down.js
@@ -3,43 +3,42 @@
// Should fail sanely
//
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({ shards : 1, mongos : 1 });
+ var st = new ShardingTest({shards: 1, mongos: 1});
-var mongos = st.s;
-var coll = mongos.getCollection( "foo.bar" );
+ var mongos = st.s;
+ var coll = mongos.getCollection("foo.bar");
-jsTestLog( "Stopping config servers" );
-for (var i = 0; i < st._configServers.length; i++) {
- MongoRunner.stopMongod(st._configServers[i]);
-}
-
-// Make sure mongos has no database info currently loaded
-mongos.getDB( "admin" ).runCommand({ flushRouterConfig : 1 });
-
-jsTestLog( "Config flushed and config servers down!" );
-
-// Throws transport error first and subsequent times when loading config data, not no primary
-for( var i = 0; i < 2; i++ ){
- try {
- coll.findOne();
- // Should always throw
- assert( false );
+ jsTestLog("Stopping config servers");
+ for (var i = 0; i < st._configServers.length; i++) {
+ MongoRunner.stopMongod(st._configServers[i]);
}
- catch( e ) {
- printjson( e );
-
- // Make sure we get a transport error, and not a no-primary error
- assert(e.code == 8002 || // SCCC config down, for v3.0 compatibility.
- e.code == 10276 || // Transport error
- e.code == 13328 || // Connect error
- e.code == ErrorCodes.HostUnreachable ||
- e.code == ErrorCodes.FailedToSatisfyReadPreference ||
- e.code == ErrorCodes.ReplicaSetNotFound);
+
+ // Make sure mongos has no database info currently loaded
+ mongos.getDB("admin").runCommand({flushRouterConfig: 1});
+
+ jsTestLog("Config flushed and config servers down!");
+
+ // Throws transport error first and subsequent times when loading config data, not no primary
+ for (var i = 0; i < 2; i++) {
+ try {
+ coll.findOne();
+ // Should always throw
+ assert(false);
+ } catch (e) {
+ printjson(e);
+
+ // Make sure we get a transport error, and not a no-primary error
+ assert(e.code == 8002 || // SCCC config down, for v3.0 compatibility.
+ e.code == 10276 || // Transport error
+ e.code == 13328 || // Connect error
+ e.code == ErrorCodes.HostUnreachable ||
+ e.code == ErrorCodes.FailedToSatisfyReadPreference ||
+ e.code == ErrorCodes.ReplicaSetNotFound);
+ }
}
-}
-st.stop();
+ st.stop();
}());
diff --git a/jstests/sharding/all_config_servers_blackholed_from_mongos.js b/jstests/sharding/all_config_servers_blackholed_from_mongos.js
index cf5ec266093..c3ed68e97de 100644
--- a/jstests/sharding/all_config_servers_blackholed_from_mongos.js
+++ b/jstests/sharding/all_config_servers_blackholed_from_mongos.js
@@ -1,44 +1,41 @@
// Ensures that if the config servers are blackholed from the point of view of MongoS, metadata
// operations do not get stuck forever.
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({
- name: 'all_config_servers_blackholed_from_mongos',
- shards: 2,
- mongos: 1,
- useBridge: true,
-});
+ var st = new ShardingTest({
+ name: 'all_config_servers_blackholed_from_mongos',
+ shards: 2,
+ mongos: 1,
+ useBridge: true,
+ });
-var testDB = st.s.getDB('BlackHoleDB');
+ var testDB = st.s.getDB('BlackHoleDB');
-assert.commandWorked(testDB.adminCommand({ enableSharding: 'BlackHoleDB' }));
-assert.commandWorked(testDB.adminCommand({
- shardCollection: testDB.ShardedColl.getFullName(),
- key: { _id: 1 }
-}));
+ assert.commandWorked(testDB.adminCommand({enableSharding: 'BlackHoleDB'}));
+ assert.commandWorked(
+ testDB.adminCommand({shardCollection: testDB.ShardedColl.getFullName(), key: {_id: 1}}));
-assert.writeOK(testDB.ShardedColl.insert({ a: 1 }));
+ assert.writeOK(testDB.ShardedColl.insert({a: 1}));
-jsTest.log('Making all the config servers appear as a blackhole to mongos');
-st._configServers.forEach(function(configSvr) {
- configSvr.discardMessagesFrom(st.s, 1.0);
-});
+ jsTest.log('Making all the config servers appear as a blackhole to mongos');
+ st._configServers.forEach(function(configSvr) {
+ configSvr.discardMessagesFrom(st.s, 1.0);
+ });
-assert.commandWorked(testDB.adminCommand({ flushRouterConfig: 1 }));
+ assert.commandWorked(testDB.adminCommand({flushRouterConfig: 1}));
-// This shouldn't stall
-jsTest.log('Doing read operation on the sharded collection');
-assert.throws(function() {
- testDB.ShardedColl.find({}).itcount();
-});
+ // This shouldn't stall
+ jsTest.log('Doing read operation on the sharded collection');
+ assert.throws(function() {
+ testDB.ShardedColl.find({}).itcount();
+ });
-// This should fail, because the primary is not available
-jsTest.log('Doing write operation on a new database and collection');
-assert.writeError(st.s.getDB('NonExistentDB').TestColl.insert({
- _id: 0,
- value: 'This value will never be inserted' }));
+ // This should fail, because the primary is not available
+ jsTest.log('Doing write operation on a new database and collection');
+ assert.writeError(st.s.getDB('NonExistentDB')
+ .TestColl.insert({_id: 0, value: 'This value will never be inserted'}));
-st.stop();
+ st.stop();
}());
diff --git a/jstests/sharding/array_shard_key.js b/jstests/sharding/array_shard_key.js
index c5d63fcae59..4fd60c3f21d 100644
--- a/jstests/sharding/array_shard_key.js
+++ b/jstests/sharding/array_shard_key.js
@@ -1,114 +1,111 @@
// Ensure you can't shard on an array key
-var st = new ShardingTest({ name : jsTestName(), shards : 3 });
+var st = new ShardingTest({name: jsTestName(), shards: 3});
var mongos = st.s0;
-var coll = mongos.getCollection( jsTestName() + ".foo" );
+var coll = mongos.getCollection(jsTestName() + ".foo");
-st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } );
+st.shardColl(coll, {_id: 1, i: 1}, {_id: ObjectId(), i: 1});
-printjson( mongos.getDB("config").chunks.find().toArray() );
+printjson(mongos.getDB("config").chunks.find().toArray());
st.printShardingStatus();
-print( "1: insert some invalid data" );
+print("1: insert some invalid data");
var value = null;
-// Insert an object with invalid array key
-assert.writeError(coll.insert({ i : [ 1, 2 ] }));
+// Insert an object with invalid array key
+assert.writeError(coll.insert({i: [1, 2]}));
// Insert an object with all the right fields, but an invalid array val for _id
-assert.writeError(coll.insert({ _id : [ 1, 2 ] , i : 3}));
+assert.writeError(coll.insert({_id: [1, 2], i: 3}));
// Insert an object with valid array key
-assert.writeOK(coll.insert({ i : 1 }));
+assert.writeOK(coll.insert({i: 1}));
// Update the value with valid other field
-value = coll.findOne({ i : 1 });
-assert.writeOK(coll.update( value, { $set : { j : 2 } } ));
+value = coll.findOne({i: 1});
+assert.writeOK(coll.update(value, {$set: {j: 2}}));
// Update the value with invalid other fields
-value = coll.findOne({ i : 1 });
-assert.writeError(coll.update( value, Object.merge( value, { i : [ 3 ] } ) ));
+value = coll.findOne({i: 1});
+assert.writeError(coll.update(value, Object.merge(value, {i: [3]})));
// Multi-update the value with invalid other fields
-value = coll.findOne({ i : 1 });
-assert.writeError(coll.update( value, Object.merge( value, { i : [ 3, 4 ] } ), false, true));
+value = coll.findOne({i: 1});
+assert.writeError(coll.update(value, Object.merge(value, {i: [3, 4]}), false, true));
// Multi-update the value with other fields (won't work, but no error)
-value = coll.findOne({ i : 1 });
-assert.writeOK(coll.update( Object.merge( value, { i : [ 1, 1 ] } ), { $set : { k : 4 } }, false, true));
+value = coll.findOne({i: 1});
+assert.writeOK(coll.update(Object.merge(value, {i: [1, 1]}), {$set: {k: 4}}, false, true));
// Query the value with other fields (won't work, but no error)
-value = coll.findOne({ i : 1 });
-coll.find( Object.merge( value, { i : [ 1, 1 ] } ) ).toArray();
+value = coll.findOne({i: 1});
+coll.find(Object.merge(value, {i: [1, 1]})).toArray();
// Can't remove using multikey, but shouldn't error
-value = coll.findOne({ i : 1 });
-coll.remove( Object.extend( value, { i : [ 1, 2, 3, 4 ] } ) );
+value = coll.findOne({i: 1});
+coll.remove(Object.extend(value, {i: [1, 2, 3, 4]}));
// Can't remove using multikey, but shouldn't error
-value = coll.findOne({ i : 1 });
-assert.writeOK(coll.remove( Object.extend( value, { i : [ 1, 2, 3, 4, 5 ] } ) ));
-assert.eq( coll.find().itcount(), 1 );
+value = coll.findOne({i: 1});
+assert.writeOK(coll.remove(Object.extend(value, {i: [1, 2, 3, 4, 5]})));
+assert.eq(coll.find().itcount(), 1);
-value = coll.findOne({ i : 1 });
-assert.writeOK(coll.remove( Object.extend( value, { i : 1 } ) ));
-assert.eq( coll.find().itcount(), 0 );
+value = coll.findOne({i: 1});
+assert.writeOK(coll.remove(Object.extend(value, {i: 1})));
+assert.eq(coll.find().itcount(), 0);
-coll.ensureIndex({ _id : 1, i : 1, j: 1 });
+coll.ensureIndex({_id: 1, i: 1, j: 1});
// Can insert document that will make index into a multi-key as long as it's not part of shard key.
coll.remove({});
-assert.writeOK(coll.insert({ i: 1, j: [1, 2] }));
-assert.eq( coll.find().itcount(), 1 );
+assert.writeOK(coll.insert({i: 1, j: [1, 2]}));
+assert.eq(coll.find().itcount(), 1);
// Same is true for updates.
coll.remove({});
-coll.insert({ _id: 1, i: 1 });
-assert.writeOK(coll.update({ _id: 1, i: 1 }, { _id: 1, i:1, j: [1, 2] }));
-assert.eq( coll.find().itcount(), 1 );
+coll.insert({_id: 1, i: 1});
+assert.writeOK(coll.update({_id: 1, i: 1}, {_id: 1, i: 1, j: [1, 2]}));
+assert.eq(coll.find().itcount(), 1);
// Same for upserts.
coll.remove({});
-assert.writeOK(coll.update({ _id: 1, i: 1 }, { _id: 1, i:1, j: [1, 2] }, true));
-assert.eq( coll.find().itcount(), 1 );
+assert.writeOK(coll.update({_id: 1, i: 1}, {_id: 1, i: 1, j: [1, 2]}, true));
+assert.eq(coll.find().itcount(), 1);
-printjson( "Sharding-then-inserting-multikey tested, now trying inserting-then-sharding-multikey" );
+printjson("Sharding-then-inserting-multikey tested, now trying inserting-then-sharding-multikey");
// Insert a bunch of data then shard over key which is an array
-var coll = mongos.getCollection( "" + coll + "2" );
-for( var i = 0; i < 10; i++ ){
+var coll = mongos.getCollection("" + coll + "2");
+for (var i = 0; i < 10; i++) {
// TODO : does not check weird cases like [ i, i ]
- assert.writeOK(coll.insert({ i : [ i, i + 1 ] }));
+ assert.writeOK(coll.insert({i: [i, i + 1]}));
}
-coll.ensureIndex({ _id : 1, i : 1 });
+coll.ensureIndex({_id: 1, i: 1});
try {
- st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } );
-}
-catch( e ){
- print( "Correctly threw error on sharding with multikey index." );
+ st.shardColl(coll, {_id: 1, i: 1}, {_id: ObjectId(), i: 1});
+} catch (e) {
+ print("Correctly threw error on sharding with multikey index.");
}
st.printShardingStatus();
// Insert a bunch of data then shard over key which is not an array
-var coll = mongos.getCollection( "" + coll + "3" );
-for( var i = 0; i < 10; i++ ){
+var coll = mongos.getCollection("" + coll + "3");
+for (var i = 0; i < 10; i++) {
// TODO : does not check weird cases like [ i, i ]
- assert.writeOK(coll.insert({ i : i }));
+ assert.writeOK(coll.insert({i: i}));
}
-coll.ensureIndex({ _id : 1, i : 1 });
+coll.ensureIndex({_id: 1, i: 1});
-st.shardColl( coll, { _id : 1, i : 1 }, { _id : ObjectId(), i : 1 } );
+st.shardColl(coll, {_id: 1, i: 1}, {_id: ObjectId(), i: 1});
st.printShardingStatus();
-
-
// Finish
st.stop();
diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js
index 8d45d4b2de3..7b8d55ee075 100644
--- a/jstests/sharding/auth.js
+++ b/jstests/sharding/auth.js
@@ -1,338 +1,366 @@
// Tests administrative sharding operations and map-reduce work or fail as expected, when key-based
// authentication is used
(function() {
-'use strict';
-
-var adminUser = {
- db : "admin",
- username : "foo",
- password : "bar"
-};
-
-var testUser = {
- db : "test",
- username : "bar",
- password : "baz"
-};
-
-var testUserReadOnly = {
- db : "test",
- username : "sad",
- password : "bat"
-};
-
-function login(userObj, thingToUse) {
- if (!thingToUse) {
- thingToUse = s;
- }
+ 'use strict';
+
+ var adminUser = {
+ db: "admin",
+ username: "foo",
+ password: "bar"
+ };
+
+ var testUser = {
+ db: "test",
+ username: "bar",
+ password: "baz"
+ };
+
+ var testUserReadOnly = {
+ db: "test",
+ username: "sad",
+ password: "bat"
+ };
+
+ function login(userObj, thingToUse) {
+ if (!thingToUse) {
+ thingToUse = s;
+ }
- thingToUse.getDB(userObj.db).auth(userObj.username, userObj.password);
-}
-
-function logout(userObj, thingToUse) {
- if (!thingToUse)
- thingToUse = s;
-
- s.getDB(userObj.db).runCommand({logout:1});
-}
-
-function getShardName(rsTest) {
- var master = rsTest.getPrimary();
- var config = master.getDB("local").system.replset.findOne();
- var members = config.members.map(function(elem) { return elem.host; });
- return config._id+"/"+members.join(",");
-}
-
-var s = new ShardingTest({ name: "auth",
- mongos: 1,
- shards: 0,
- other: {
- extraOptions: { "keyFile": "jstests/libs/key1" },
- noChunkSize: true, }
- });
-
-if (s.getDB('admin').runCommand('buildInfo').bits < 64) {
- print('Skipping test on 32-bit platforms');
- return;
-}
-
-print("Configuration: Add user " + tojson(adminUser));
-s.getDB(adminUser.db).createUser({user: adminUser.username,
- pwd: adminUser.password,
- roles: jsTest.adminUserRoles});
-login(adminUser);
-
-// Set the chunk size, disable the secondary throttle (so the test doesn't run so slow)
-assert.writeOK(s.getDB( "config" ).settings.update({ _id: "chunksize" },
- { $set: { value : 1 } },
- { upsert: true }));
-assert.writeOK(s.getDB( "config" ).settings.update(
- { _id: "balancer" },
- { $set: { "_secondaryThrottle" : false,
- "_waitForDelete" : true } },
- { upsert: true }));
-
-printjson(s.getDB("config").settings.find().toArray());
-
-print("Restart mongos with different auth options");
-s.restartMongos(0, { v: 2,
- configdb: s._configDB,
- keyFile: "jstests/libs/key1",
- chunkSize: 1 });
-login(adminUser);
-
-var d1 = new ReplSetTest({ name : "d1", nodes : 3, useHostName : true });
-d1.startSet({keyFile : "jstests/libs/key2" });
-d1.initiate();
-
-print("d1 initiated");
-var shardName = authutil.asCluster(d1.nodes,
- "jstests/libs/key2",
- function() { return getShardName(d1); });
-
-print("adding shard w/out auth "+shardName);
-logout(adminUser);
-
-var result = s.getDB("admin").runCommand({addShard : shardName});
-printjson(result);
-assert.eq(result.code, 13);
-
-login(adminUser);
-
-print("adding shard w/wrong key "+shardName);
-
-var thrown = false;
-try {
- result = s.adminCommand({addShard : shardName});
-}
-catch(e) {
- thrown = true;
- printjson(e);
-}
-assert(thrown);
+ thingToUse.getDB(userObj.db).auth(userObj.username, userObj.password);
+ }
-print("start rs w/correct key");
+ function logout(userObj, thingToUse) {
+ if (!thingToUse)
+ thingToUse = s;
-d1.stopSet();
-d1.startSet({keyFile : "jstests/libs/key1" });
-d1.initiate();
+ s.getDB(userObj.db).runCommand({logout: 1});
+ }
-var master = d1.getPrimary();
+ function getShardName(rsTest) {
+ var master = rsTest.getPrimary();
+ var config = master.getDB("local").system.replset.findOne();
+ var members = config.members.map(function(elem) {
+ return elem.host;
+ });
+ return config._id + "/" + members.join(",");
+ }
-print("adding shard w/auth " + shardName);
+ var s = new ShardingTest({
+ name: "auth",
+ mongos: 1,
+ shards: 0,
+ other: {
+ extraOptions: {"keyFile": "jstests/libs/key1"},
+ noChunkSize: true,
+ }
+ });
-result = s.getDB("admin").runCommand({addShard : shardName});
-assert.eq(result.ok, 1, tojson(result));
+ if (s.getDB('admin').runCommand('buildInfo').bits < 64) {
+ print('Skipping test on 32-bit platforms');
+ return;
+ }
-s.getDB("admin").runCommand({enableSharding : "test"});
-s.getDB("admin").runCommand({shardCollection : "test.foo", key : {x : 1}});
+ print("Configuration: Add user " + tojson(adminUser));
+ s.getDB(adminUser.db)
+ .createUser(
+ {user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles});
+ login(adminUser);
+
+ // Set the chunk size, disable the secondary throttle (so the test doesn't run so slow)
+ assert.writeOK(
+ s.getDB("config").settings.update({_id: "chunksize"}, {$set: {value: 1}}, {upsert: true}));
+ assert.writeOK(s.getDB("config").settings.update(
+ {_id: "balancer"},
+ {$set: {"_secondaryThrottle": false, "_waitForDelete": true}},
+ {upsert: true}));
+
+ printjson(s.getDB("config").settings.find().toArray());
+
+ print("Restart mongos with different auth options");
+ s.restartMongos(0, {v: 2, configdb: s._configDB, keyFile: "jstests/libs/key1", chunkSize: 1});
+ login(adminUser);
+
+ var d1 = new ReplSetTest({name: "d1", nodes: 3, useHostName: true});
+ d1.startSet({keyFile: "jstests/libs/key2"});
+ d1.initiate();
+
+ print("d1 initiated");
+ var shardName = authutil.asCluster(d1.nodes,
+ "jstests/libs/key2",
+ function() {
+ return getShardName(d1);
+ });
+
+ print("adding shard w/out auth " + shardName);
+ logout(adminUser);
+
+ var result = s.getDB("admin").runCommand({addShard: shardName});
+ printjson(result);
+ assert.eq(result.code, 13);
+
+ login(adminUser);
+
+ print("adding shard w/wrong key " + shardName);
+
+ var thrown = false;
+ try {
+ result = s.adminCommand({addShard: shardName});
+ } catch (e) {
+ thrown = true;
+ printjson(e);
+ }
+ assert(thrown);
-d1.waitForState( d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000 );
+ print("start rs w/correct key");
-s.getDB(testUser.db).createUser({user: testUser.username,
- pwd: testUser.password,
- roles: jsTest.basicUserRoles});
-s.getDB(testUserReadOnly.db).createUser({user: testUserReadOnly.username,
- pwd: testUserReadOnly.password,
- roles: jsTest.readOnlyUserRoles});
+ d1.stopSet();
+ d1.startSet({keyFile: "jstests/libs/key1"});
+ d1.initiate();
-logout(adminUser);
+ var master = d1.getPrimary();
-print("query try");
-var e = assert.throws(function() {
- s.s.getDB("foo").bar.findOne();
-});
-printjson(e);
+ print("adding shard w/auth " + shardName);
-print("cmd try");
-assert.eq(0, s.s.getDB("foo").runCommand({listDatabases:1}).ok);
+ result = s.getDB("admin").runCommand({addShard: shardName});
+ assert.eq(result.ok, 1, tojson(result));
-print("insert try 1");
-s.getDB("test").foo.insert({x:1});
+ s.getDB("admin").runCommand({enableSharding: "test"});
+ s.getDB("admin").runCommand({shardCollection: "test.foo", key: {x: 1}});
-login(testUser);
-assert.eq(s.getDB("test").foo.findOne(), null);
+ d1.waitForState(d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
-print("insert try 2");
-assert.writeOK(s.getDB("test").foo.insert({ x: 1 }));
-assert.eq( 1 , s.getDB( "test" ).foo.find().itcount() , tojson(result) );
+ s.getDB(testUser.db)
+ .createUser(
+ {user: testUser.username, pwd: testUser.password, roles: jsTest.basicUserRoles});
+ s.getDB(testUserReadOnly.db)
+ .createUser({
+ user: testUserReadOnly.username,
+ pwd: testUserReadOnly.password,
+ roles: jsTest.readOnlyUserRoles
+ });
-logout(testUser);
+ logout(adminUser);
-var d2 = new ReplSetTest({name : "d2", nodes : 3, useHostName : true });
-d2.startSet({keyFile : "jstests/libs/key1" });
-d2.initiate();
-d2.awaitSecondaryNodes();
+ print("query try");
+ var e = assert.throws(function() {
+ s.s.getDB("foo").bar.findOne();
+ });
+ printjson(e);
-shardName = authutil.asCluster(d2.nodes, "jstests/libs/key1",
- function() { return getShardName(d2); });
+ print("cmd try");
+ assert.eq(0, s.s.getDB("foo").runCommand({listDatabases: 1}).ok);
-print("adding shard "+shardName);
-login(adminUser);
-print("logged in");
-result = s.getDB("admin").runCommand({addShard : shardName});
+ print("insert try 1");
+ s.getDB("test").foo.insert({x: 1});
-ReplSetTest.awaitRSClientHosts(s.s, d1.nodes, {ok: true });
-ReplSetTest.awaitRSClientHosts(s.s, d2.nodes, {ok: true });
+ login(testUser);
+ assert.eq(s.getDB("test").foo.findOne(), null);
-s.getDB("test").foo.remove({});
+ print("insert try 2");
+ assert.writeOK(s.getDB("test").foo.insert({x: 1}));
+ assert.eq(1, s.getDB("test").foo.find().itcount(), tojson(result));
-var num = 10000;
-var bulk = s.getDB("test").foo.initializeUnorderedBulkOp();
-for (i=0; i<num; i++) {
- bulk.insert({ _id: i, x: i, abc: "defg", date: new Date(), str: "all the talk on the market" });
-}
-assert.writeOK(bulk.execute());
+ logout(testUser);
-s.startBalancer(60000);
+ var d2 = new ReplSetTest({name: "d2", nodes: 3, useHostName: true});
+ d2.startSet({keyFile: "jstests/libs/key1"});
+ d2.initiate();
+ d2.awaitSecondaryNodes();
-assert.soon(function() {
- var d1Chunks = s.getDB("config").chunks.count({shard : "d1"});
- var d2Chunks = s.getDB("config").chunks.count({shard : "d2"});
- var totalChunks = s.getDB("config").chunks.count({ns : "test.foo"});
+ shardName = authutil.asCluster(d2.nodes,
+ "jstests/libs/key1",
+ function() {
+ return getShardName(d2);
+ });
- print("chunks: " + d1Chunks+" "+d2Chunks+" "+totalChunks);
+ print("adding shard " + shardName);
+ login(adminUser);
+ print("logged in");
+ result = s.getDB("admin").runCommand({addShard: shardName});
- return d1Chunks > 0 && d2Chunks > 0 && (d1Chunks + d2Chunks == totalChunks);
- },
- "Chunks failed to balance",
- 60000,
- 5000);
+ ReplSetTest.awaitRSClientHosts(s.s, d1.nodes, {ok: true});
+ ReplSetTest.awaitRSClientHosts(s.s, d2.nodes, {ok: true});
-//SERVER-3645
-//assert.eq(s.getDB("test").foo.count(), num+1);
-var numDocs = s.getDB("test").foo.find().itcount();
-if (numDocs != num) {
- // Missing documents. At this point we're already in a failure mode, the code in this statement
- // is to get a better idea how/why it's failing.
+ s.getDB("test").foo.remove({});
- var numDocsSeen = 0;
- var lastDocNumber = -1;
- var missingDocNumbers = [];
- var docs = s.getDB("test").foo.find().sort({x:1}).toArray();
- for (var i = 0; i < docs.length; i++) {
- if (docs[i].x != lastDocNumber + 1) {
- for (var missing = lastDocNumber + 1; missing < docs[i].x; missing++) {
- missingDocNumbers.push(missing);
+ var num = 10000;
+ var bulk = s.getDB("test").foo.initializeUnorderedBulkOp();
+ for (i = 0; i < num; i++) {
+ bulk.insert(
+ {_id: i, x: i, abc: "defg", date: new Date(), str: "all the talk on the market"});
+ }
+ assert.writeOK(bulk.execute());
+
+ s.startBalancer(60000);
+
+ assert.soon(function() {
+ var d1Chunks = s.getDB("config").chunks.count({shard: "d1"});
+ var d2Chunks = s.getDB("config").chunks.count({shard: "d2"});
+ var totalChunks = s.getDB("config").chunks.count({ns: "test.foo"});
+
+ print("chunks: " + d1Chunks + " " + d2Chunks + " " + totalChunks);
+
+ return d1Chunks > 0 && d2Chunks > 0 && (d1Chunks + d2Chunks == totalChunks);
+ }, "Chunks failed to balance", 60000, 5000);
+
+ // SERVER-3645
+ // assert.eq(s.getDB("test").foo.count(), num+1);
+ var numDocs = s.getDB("test").foo.find().itcount();
+ if (numDocs != num) {
+ // Missing documents. At this point we're already in a failure mode, the code in this
+ // statement
+ // is to get a better idea how/why it's failing.
+
+ var numDocsSeen = 0;
+ var lastDocNumber = -1;
+ var missingDocNumbers = [];
+ var docs = s.getDB("test").foo.find().sort({x: 1}).toArray();
+ for (var i = 0; i < docs.length; i++) {
+ if (docs[i].x != lastDocNumber + 1) {
+ for (var missing = lastDocNumber + 1; missing < docs[i].x; missing++) {
+ missingDocNumbers.push(missing);
+ }
}
+ lastDocNumber = docs[i].x;
+ numDocsSeen++;
}
- lastDocNumber = docs[i].x;
- numDocsSeen++;
- }
- assert.eq(numDocs, numDocsSeen, "More docs discovered on second find()");
- assert.eq(num - numDocs, missingDocNumbers.length);
+ assert.eq(numDocs, numDocsSeen, "More docs discovered on second find()");
+ assert.eq(num - numDocs, missingDocNumbers.length);
- load('jstests/libs/trace_missing_docs.js');
+ load('jstests/libs/trace_missing_docs.js');
- for ( var i = 0; i < missingDocNumbers.length; i++ ) {
- jsTest.log( "Tracing doc: " + missingDocNumbers[i] );
- traceMissingDoc( s.getDB( "test" ).foo, { _id : missingDocNumbers[i],
- x : missingDocNumbers[i] } );
+ for (var i = 0; i < missingDocNumbers.length; i++) {
+ jsTest.log("Tracing doc: " + missingDocNumbers[i]);
+ traceMissingDoc(s.getDB("test").foo,
+ {_id: missingDocNumbers[i], x: missingDocNumbers[i]});
+ }
+
+ assert(false,
+ "Number of docs found does not equal the number inserted. Missing docs: " +
+ missingDocNumbers);
}
- assert(false, "Number of docs found does not equal the number inserted. Missing docs: " + missingDocNumbers);
-}
+ // We're only sure we aren't duplicating documents iff there's no balancing going on here
+ // This call also waits for any ongoing balancing to stop
+ s.stopBalancer(60000);
-// We're only sure we aren't duplicating documents iff there's no balancing going on here
-// This call also waits for any ongoing balancing to stop
-s.stopBalancer(60000);
+ var cursor = s.getDB("test").foo.find({x: {$lt: 500}});
-var cursor = s.getDB("test").foo.find({x:{$lt : 500}});
+ var count = 0;
+ while (cursor.hasNext()) {
+ cursor.next();
+ count++;
+ }
-var count = 0;
-while (cursor.hasNext()) {
- cursor.next();
- count++;
-}
+ assert.eq(count, 500);
+
+ logout(adminUser);
+
+ d1.waitForState(d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
+ d2.waitForState(d2.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
+
+ authutil.asCluster(d1.nodes,
+ "jstests/libs/key1",
+ function() {
+ d1.awaitReplication(120000);
+ });
+ authutil.asCluster(d2.nodes,
+ "jstests/libs/key1",
+ function() {
+ d2.awaitReplication(120000);
+ });
+
+ // add admin on shard itself, hack to prevent localhost auth bypass
+ d1.getPrimary()
+ .getDB(adminUser.db)
+ .createUser(
+ {user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles},
+ {w: 3, wtimeout: 60000});
+ d2.getPrimary()
+ .getDB(adminUser.db)
+ .createUser(
+ {user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles},
+ {w: 3, wtimeout: 60000});
+
+ login(testUser);
+ print("testing map reduce");
+
+ // Sharded map reduce can be tricky since all components talk to each other. For example
+ // SERVER-4114 is triggered when 1 mongod connects to another for final reduce it's not
+ // properly tested here since addresses are localhost, which is more permissive.
+ var res = s.getDB("test").runCommand({
+ mapreduce: "foo",
+ map: function() {
+ emit(this.x, 1);
+ },
+ reduce: function(key, values) {
+ return values.length;
+ },
+ out: "mrout"
+ });
+ printjson(res);
+ assert.commandWorked(res);
+
+ // Check that dump doesn't get stuck with auth
+ var x = runMongoProgram("mongodump",
+ "--host",
+ s.s.host,
+ "-d",
+ testUser.db,
+ "-u",
+ testUser.username,
+ "-p",
+ testUser.password,
+ "--authenticationMechanism",
+ "SCRAM-SHA-1");
+ print("result: " + x);
+
+ // Test read only users
+ print("starting read only tests");
+
+ var readOnlyS = new Mongo(s.getDB("test").getMongo().host);
+ var readOnlyDB = readOnlyS.getDB("test");
+
+ print(" testing find that should fail");
+ assert.throws(function() {
+ readOnlyDB.foo.findOne();
+ });
-assert.eq(count, 500);
+ print(" logging in");
+ login(testUserReadOnly, readOnlyS);
-logout(adminUser);
+ print(" testing find that should work");
+ readOnlyDB.foo.findOne();
-d1.waitForState( d1.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000 );
-d2.waitForState( d2.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000 );
+ print(" testing write that should fail");
+ assert.writeError(readOnlyDB.foo.insert({eliot: 1}));
-authutil.asCluster(d1.nodes, "jstests/libs/key1", function() { d1.awaitReplication(120000); });
-authutil.asCluster(d2.nodes, "jstests/libs/key1", function() { d2.awaitReplication(120000); });
+ print(" testing read command (should succeed)");
+ assert.commandWorked(readOnlyDB.runCommand({count: "foo"}));
-// add admin on shard itself, hack to prevent localhost auth bypass
-d1.getPrimary().getDB(adminUser.db).createUser({user: adminUser.username,
- pwd: adminUser.password,
- roles: jsTest.adminUserRoles},
- {w: 3, wtimeout: 60000});
-d2.getPrimary().getDB(adminUser.db).createUser({user: adminUser.username,
- pwd: adminUser.password,
- roles: jsTest.adminUserRoles},
- {w: 3, wtimeout: 60000});
+ print("make sure currentOp/killOp fail");
+ assert.commandFailed(readOnlyDB.currentOp());
+ assert.commandFailed(readOnlyDB.killOp(123));
-login(testUser);
-print( "testing map reduce" );
+ // fsyncUnlock doesn't work in mongos anyway, so no need check authorization for it
+ /*
+ broken because of SERVER-4156
+ print( " testing write command (should fail)" );
+ assert.commandFailed(readOnlyDB.runCommand(
+ {mapreduce : "foo",
+ map : function() { emit(this.y, 1); },
+ reduce : function(key, values) { return values.length; },
+ out:"blarg"
+ }));
+ */
-// Sharded map reduce can be tricky since all components talk to each other. For example
-// SERVER-4114 is triggered when 1 mongod connects to another for final reduce it's not
-// properly tested here since addresses are localhost, which is more permissive.
-var res = s.getDB("test").runCommand(
- {mapreduce : "foo",
- map : function() { emit(this.x, 1); },
- reduce : function(key, values) { return values.length; },
- out:"mrout"
- });
-printjson(res);
-assert.commandWorked(res);
-
-// Check that dump doesn't get stuck with auth
-var x = runMongoProgram("mongodump",
- "--host", s.s.host,
- "-d", testUser.db,
- "-u", testUser.username,
- "-p", testUser.password,
- "--authenticationMechanism", "SCRAM-SHA-1");
-print("result: " + x);
-
-// Test read only users
-print( "starting read only tests" );
-
-var readOnlyS = new Mongo( s.getDB( "test" ).getMongo().host );
-var readOnlyDB = readOnlyS.getDB( "test" );
-
-print( " testing find that should fail" );
-assert.throws( function(){ readOnlyDB.foo.findOne(); } );
-
-print( " logging in" );
-login( testUserReadOnly , readOnlyS );
-
-print( " testing find that should work" );
-readOnlyDB.foo.findOne();
-
-print( " testing write that should fail" );
-assert.writeError(readOnlyDB.foo.insert({ eliot: 1 }));
-
-print( " testing read command (should succeed)" );
-assert.commandWorked(readOnlyDB.runCommand({count : "foo"}));
-
-print("make sure currentOp/killOp fail");
-assert.commandFailed(readOnlyDB.currentOp());
-assert.commandFailed(readOnlyDB.killOp(123));
-
-// fsyncUnlock doesn't work in mongos anyway, so no need check authorization for it
-/*
-broken because of SERVER-4156
-print( " testing write command (should fail)" );
-assert.commandFailed(readOnlyDB.runCommand(
- {mapreduce : "foo",
- map : function() { emit(this.y, 1); },
- reduce : function(key, values) { return values.length; },
- out:"blarg"
- }));
-*/
-
-print( " testing logout (should succeed)" );
-assert.commandWorked(readOnlyDB.runCommand({logout : 1}));
-
-print("make sure currentOp/killOp fail again");
-assert.commandFailed(readOnlyDB.currentOp());
-assert.commandFailed(readOnlyDB.killOp(123));
-
-s.stop();
+ print(" testing logout (should succeed)");
+ assert.commandWorked(readOnlyDB.runCommand({logout: 1}));
+
+ print("make sure currentOp/killOp fail again");
+ assert.commandFailed(readOnlyDB.currentOp());
+ assert.commandFailed(readOnlyDB.killOp(123));
+
+ s.stop();
})();
diff --git a/jstests/sharding/auth2.js b/jstests/sharding/auth2.js
index e58657e8dba..e26c58dccf1 100644
--- a/jstests/sharding/auth2.js
+++ b/jstests/sharding/auth2.js
@@ -1,6 +1,10 @@
-var st = new ShardingTest({ keyFile : 'jstests/libs/key1', shards : 2, chunkSize: 1, verbose : 2,
- other : { nopreallocj : 1, verbose : 2, useHostname : true,
- configOptions : { verbose : 2 }}});
+var st = new ShardingTest({
+ keyFile: 'jstests/libs/key1',
+ shards: 2,
+ chunkSize: 1,
+ verbose: 2,
+ other: {nopreallocj: 1, verbose: 2, useHostname: true, configOptions: {verbose: 2}}
+});
var mongos = st.s;
var adminDB = mongos.getDB('admin');
@@ -8,13 +12,12 @@ var db = mongos.getDB('test');
adminDB.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles});
-jsTestLog( "Add user was successful" );
-
+jsTestLog("Add user was successful");
// Test for SERVER-6549, make sure that repeatedly logging in always passes.
-for ( var i = 0; i < 100; i++ ) {
- adminDB = new Mongo( mongos.host ).getDB('admin');
- assert( adminDB.auth('admin', 'password'), "Auth failed on attempt #: " + i );
+for (var i = 0; i < 100; i++) {
+ adminDB = new Mongo(mongos.host).getDB('admin');
+ assert(adminDB.auth('admin', 'password'), "Auth failed on attempt #: " + i);
}
st.stop();
diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js
index 4de15e2f58a..cb1887d4aae 100644
--- a/jstests/sharding/authCommands.js
+++ b/jstests/sharding/authCommands.js
@@ -3,292 +3,310 @@
*/
var doTest = function() {
-var rsOpts = { oplogSize: 10, useHostname : false };
-var st = new ShardingTest({ keyFile : 'jstests/libs/key1', shards : 2, chunksize : 2,
- rs : rsOpts, other : { nopreallocj : 1, useHostname : false }});
-
-var mongos = st.s;
-var adminDB = mongos.getDB( 'admin' );
-var configDB = mongos.getDB( 'config' );
-var testDB = mongos.getDB( 'test' );
-
-jsTestLog('Setting up initial users');
-var rwUser = 'rwUser';
-var roUser = 'roUser';
-var password = 'password';
-var expectedDocs = 1000;
-
-adminDB.createUser({user: rwUser, pwd: password, roles: jsTest.adminUserRoles});
-
-assert( adminDB.auth( rwUser, password ) );
-
-// Secondaries should be up here, since we awaitReplication in the ShardingTest, but we *don't*
-// wait for the mongos to explicitly detect them.
-ReplSetTest.awaitRSClientHosts( mongos, st.rs0.getSecondaries(), { ok : true, secondary : true });
-ReplSetTest.awaitRSClientHosts( mongos, st.rs1.getSecondaries(), { ok : true, secondary : true });
-
-testDB.createUser({user: rwUser, pwd: password, roles: jsTest.basicUserRoles});
-testDB.createUser({user: roUser, pwd: password, roles: jsTest.readOnlyUserRoles});
-
-authenticatedConn = new Mongo( mongos.host );
-authenticatedConn.getDB( 'admin' ).auth( rwUser, password );
-
-// Add user to shards to prevent localhost connections from having automatic full access
-st.rs0.getPrimary().getDB( 'admin' ).createUser({user: 'user',
- pwd: 'password',
- roles: jsTest.basicUserRoles},
- {w: 3, wtimeout: 30000});
-st.rs1.getPrimary().getDB( 'admin' ).createUser({user: 'user',
- pwd: 'password',
- roles: jsTest.basicUserRoles},
- {w: 3, wtimeout: 30000} );
-
-
-
-jsTestLog('Creating initial data');
-
-st.adminCommand( { enablesharding : "test" } );
-st.ensurePrimaryShard('test', 'test-rs0');
-st.adminCommand( { shardcollection : "test.foo" , key : { i : 1, j : 1 } } );
-
-// Balancer is stopped by default, so no moveChunks will interfere with the splits we're testing
-
-var str = 'a';
-while ( str.length < 8000 ) {
- str += str;
-}
-
-var bulk = testDB.foo.initializeUnorderedBulkOp();
-for ( var i = 0; i < 100; i++ ) {
- for ( var j = 0; j < 10; j++ ) {
- bulk.insert({i:i, j:j, str:str});
+ var rsOpts = {
+ oplogSize: 10,
+ useHostname: false
+ };
+ var st = new ShardingTest({
+ keyFile: 'jstests/libs/key1',
+ shards: 2,
+ chunksize: 2,
+ rs: rsOpts,
+ other: {nopreallocj: 1, useHostname: false}
+ });
+
+ var mongos = st.s;
+ var adminDB = mongos.getDB('admin');
+ var configDB = mongos.getDB('config');
+ var testDB = mongos.getDB('test');
+
+ jsTestLog('Setting up initial users');
+ var rwUser = 'rwUser';
+ var roUser = 'roUser';
+ var password = 'password';
+ var expectedDocs = 1000;
+
+ adminDB.createUser({user: rwUser, pwd: password, roles: jsTest.adminUserRoles});
+
+ assert(adminDB.auth(rwUser, password));
+
+ // Secondaries should be up here, since we awaitReplication in the ShardingTest, but we *don't*
+ // wait for the mongos to explicitly detect them.
+ ReplSetTest.awaitRSClientHosts(mongos, st.rs0.getSecondaries(), {ok: true, secondary: true});
+ ReplSetTest.awaitRSClientHosts(mongos, st.rs1.getSecondaries(), {ok: true, secondary: true});
+
+ testDB.createUser({user: rwUser, pwd: password, roles: jsTest.basicUserRoles});
+ testDB.createUser({user: roUser, pwd: password, roles: jsTest.readOnlyUserRoles});
+
+ authenticatedConn = new Mongo(mongos.host);
+ authenticatedConn.getDB('admin').auth(rwUser, password);
+
+ // Add user to shards to prevent localhost connections from having automatic full access
+ st.rs0.getPrimary().getDB('admin').createUser(
+ {user: 'user', pwd: 'password', roles: jsTest.basicUserRoles}, {w: 3, wtimeout: 30000});
+ st.rs1.getPrimary().getDB('admin').createUser(
+ {user: 'user', pwd: 'password', roles: jsTest.basicUserRoles}, {w: 3, wtimeout: 30000});
+
+ jsTestLog('Creating initial data');
+
+ st.adminCommand({enablesharding: "test"});
+ st.ensurePrimaryShard('test', 'test-rs0');
+ st.adminCommand({shardcollection: "test.foo", key: {i: 1, j: 1}});
+
+ // Balancer is stopped by default, so no moveChunks will interfere with the splits we're testing
+
+ var str = 'a';
+ while (str.length < 8000) {
+ str += str;
}
-}
-assert.writeOK(bulk.execute({ w: "majority"}));
-
-assert.eq(expectedDocs, testDB.foo.count());
-
-// Wait for the balancer to start back up
-assert.writeOK(configDB.settings.update({_id: 'balancer'}, {$set: {_waitForDelete: true}}, true));
-st.startBalancer();
-
-// Make sure we've done at least some splitting, so the balancer will work
-assert.gt( configDB.chunks.find({ ns : 'test.foo' }).count(), 2 );
-
-// Make sure we eventually balance all the chunks we've created
-assert.soon( function() {
- var x = st.chunkDiff( "foo", "test" );
- print( "chunk diff: " + x );
- return x < 2 && configDB.locks.findOne({ _id : 'test.foo' }).state == 0;
-}, "no balance happened", 5 * 60 * 1000 );
-
-var map = function() { emit (this.i, this.j); };
-var reduce = function( key, values ) {
- var jCount = 0;
- values.forEach( function(j) { jCount += j; } );
- return jCount;
-};
-
-var checkCommandSucceeded = function( db, cmdObj ) {
- print( "Running command that should succeed: " );
- printjson( cmdObj );
- resultObj = db.runCommand( cmdObj );
- printjson( resultObj );
- assert ( resultObj.ok );
- return resultObj;
-};
-
-var checkCommandFailed = function( db, cmdObj ) {
- print( "Running command that should fail: " );
- printjson( cmdObj );
- resultObj = db.runCommand( cmdObj );
- printjson( resultObj );
- assert ( !resultObj.ok );
- return resultObj;
-};
-var checkReadOps = function( hasReadAuth ) {
- if ( hasReadAuth ) {
- print( "Checking read operations, should work" );
- assert.eq( expectedDocs, testDB.foo.find().itcount() );
- assert.eq( expectedDocs, testDB.foo.count() );
- // NOTE: This is an explicit check that GLE can be run with read prefs, not the result of
- // above.
- assert.eq( null, testDB.runCommand({getlasterror : 1}).err );
- checkCommandSucceeded( testDB, {dbstats : 1} );
- checkCommandSucceeded( testDB, {collstats : 'foo'} );
-
- // inline map-reduce works read-only
- var res = checkCommandSucceeded( testDB, {mapreduce : 'foo', map : map, reduce : reduce,
- out : {inline : 1}});
- assert.eq( 100, res.results.length );
- assert.eq( 45, res.results[0].value );
-
- res = checkCommandSucceeded( testDB,
- {aggregate:'foo',
- pipeline: [ {$project : {j : 1}},
- {$group : {_id : 'j', sum : {$sum : '$j'}}}]} );
- assert.eq( 4500, res.result[0].sum );
- } else {
- print( "Checking read operations, should fail" );
- assert.throws( function() { testDB.foo.find().itcount(); } );
- checkCommandFailed( testDB, {dbstats : 1} );
- checkCommandFailed( testDB, {collstats : 'foo'} );
- checkCommandFailed( testDB, {mapreduce : 'foo', map : map, reduce : reduce,
- out : { inline : 1 }} );
- checkCommandFailed( testDB, {aggregate:'foo',
- pipeline: [ {$project : {j : 1}},
- {$group : {_id : 'j', sum : {$sum : '$j'}}}]} );
+ var bulk = testDB.foo.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; i++) {
+ for (var j = 0; j < 10; j++) {
+ bulk.insert({i: i, j: j, str: str});
+ }
}
-};
-
-var checkWriteOps = function( hasWriteAuth ) {
- if ( hasWriteAuth ) {
- print( "Checking write operations, should work" );
- testDB.foo.insert({a : 1, i : 1, j : 1});
- res = checkCommandSucceeded( testDB, { findAndModify: "foo", query: {a:1, i:1, j:1},
- update: {$set: {b:1}}});
- assert.eq(1, res.value.a);
- assert.eq(null, res.value.b);
- assert.eq(1, testDB.foo.findOne({a:1}).b);
- testDB.foo.remove({a : 1});
- assert.eq( null, testDB.runCommand({getlasterror : 1}).err );
- checkCommandSucceeded( testDB, {reIndex:'foo'} );
- checkCommandSucceeded( testDB, {repairDatabase : 1} );
- checkCommandSucceeded( testDB, {mapreduce : 'foo', map : map, reduce : reduce,
- out : 'mrOutput'} );
- assert.eq( 100, testDB.mrOutput.count() );
- assert.eq( 45, testDB.mrOutput.findOne().value );
-
- checkCommandSucceeded( testDB, {drop : 'foo'} );
- assert.eq( 0, testDB.foo.count() );
- testDB.foo.insert({a:1});
- assert.eq( 1, testDB.foo.count() );
- checkCommandSucceeded( testDB, {dropDatabase : 1} );
- assert.eq( 0, testDB.foo.count() );
- checkCommandSucceeded( testDB, {create : 'baz'} );
- } else {
- print( "Checking write operations, should fail" );
- testDB.foo.insert({a : 1, i : 1, j : 1});
- assert.eq(0, authenticatedConn.getDB('test').foo.count({a : 1, i : 1, j : 1}));
- checkCommandFailed( testDB, { findAndModify: "foo", query: {a:1, i:1, j:1},
- update: {$set: {b:1}}} );
- checkCommandFailed( testDB, {reIndex:'foo'} );
- checkCommandFailed( testDB, {repairDatabase : 1} );
- checkCommandFailed( testDB, {mapreduce : 'foo', map : map, reduce : reduce,
- out : 'mrOutput'} );
- checkCommandFailed( testDB, {drop : 'foo'} );
- checkCommandFailed( testDB, {dropDatabase : 1} );
- passed = true;
- try {
- // For some reason when create fails it throws an exception instead of just returning ok:0
- res = testDB.runCommand( {create : 'baz'} );
- if ( !res.ok ) {
+ assert.writeOK(bulk.execute({w: "majority"}));
+
+ assert.eq(expectedDocs, testDB.foo.count());
+
+ // Wait for the balancer to start back up
+ assert.writeOK(
+ configDB.settings.update({_id: 'balancer'}, {$set: {_waitForDelete: true}}, true));
+ st.startBalancer();
+
+ // Make sure we've done at least some splitting, so the balancer will work
+ assert.gt(configDB.chunks.find({ns: 'test.foo'}).count(), 2);
+
+ // Make sure we eventually balance all the chunks we've created
+ assert.soon(function() {
+ var x = st.chunkDiff("foo", "test");
+ print("chunk diff: " + x);
+ return x < 2 && configDB.locks.findOne({_id: 'test.foo'}).state == 0;
+ }, "no balance happened", 5 * 60 * 1000);
+
+ var map = function() {
+ emit(this.i, this.j);
+ };
+ var reduce = function(key, values) {
+ var jCount = 0;
+ values.forEach(function(j) {
+ jCount += j;
+ });
+ return jCount;
+ };
+
+ var checkCommandSucceeded = function(db, cmdObj) {
+ print("Running command that should succeed: ");
+ printjson(cmdObj);
+ resultObj = db.runCommand(cmdObj);
+ printjson(resultObj);
+ assert(resultObj.ok);
+ return resultObj;
+ };
+
+ var checkCommandFailed = function(db, cmdObj) {
+ print("Running command that should fail: ");
+ printjson(cmdObj);
+ resultObj = db.runCommand(cmdObj);
+ printjson(resultObj);
+ assert(!resultObj.ok);
+ return resultObj;
+ };
+
+ var checkReadOps = function(hasReadAuth) {
+ if (hasReadAuth) {
+ print("Checking read operations, should work");
+ assert.eq(expectedDocs, testDB.foo.find().itcount());
+ assert.eq(expectedDocs, testDB.foo.count());
+ // NOTE: This is an explicit check that GLE can be run with read prefs, not the result
+ // of
+ // above.
+ assert.eq(null, testDB.runCommand({getlasterror: 1}).err);
+ checkCommandSucceeded(testDB, {dbstats: 1});
+ checkCommandSucceeded(testDB, {collstats: 'foo'});
+
+ // inline map-reduce works read-only
+ var res = checkCommandSucceeded(
+ testDB, {mapreduce: 'foo', map: map, reduce: reduce, out: {inline: 1}});
+ assert.eq(100, res.results.length);
+ assert.eq(45, res.results[0].value);
+
+ res = checkCommandSucceeded(
+ testDB,
+ {
+ aggregate: 'foo',
+ pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}]
+ });
+ assert.eq(4500, res.result[0].sum);
+ } else {
+ print("Checking read operations, should fail");
+ assert.throws(function() {
+ testDB.foo.find().itcount();
+ });
+ checkCommandFailed(testDB, {dbstats: 1});
+ checkCommandFailed(testDB, {collstats: 'foo'});
+ checkCommandFailed(testDB,
+ {mapreduce: 'foo', map: map, reduce: reduce, out: {inline: 1}});
+ checkCommandFailed(
+ testDB,
+ {
+ aggregate: 'foo',
+ pipeline: [{$project: {j: 1}}, {$group: {_id: 'j', sum: {$sum: '$j'}}}]
+ });
+ }
+ };
+
+ var checkWriteOps = function(hasWriteAuth) {
+ if (hasWriteAuth) {
+ print("Checking write operations, should work");
+ testDB.foo.insert({a: 1, i: 1, j: 1});
+ res = checkCommandSucceeded(
+ testDB, {findAndModify: "foo", query: {a: 1, i: 1, j: 1}, update: {$set: {b: 1}}});
+ assert.eq(1, res.value.a);
+ assert.eq(null, res.value.b);
+ assert.eq(1, testDB.foo.findOne({a: 1}).b);
+ testDB.foo.remove({a: 1});
+ assert.eq(null, testDB.runCommand({getlasterror: 1}).err);
+ checkCommandSucceeded(testDB, {reIndex: 'foo'});
+ checkCommandSucceeded(testDB, {repairDatabase: 1});
+ checkCommandSucceeded(testDB,
+ {mapreduce: 'foo', map: map, reduce: reduce, out: 'mrOutput'});
+ assert.eq(100, testDB.mrOutput.count());
+ assert.eq(45, testDB.mrOutput.findOne().value);
+
+ checkCommandSucceeded(testDB, {drop: 'foo'});
+ assert.eq(0, testDB.foo.count());
+ testDB.foo.insert({a: 1});
+ assert.eq(1, testDB.foo.count());
+ checkCommandSucceeded(testDB, {dropDatabase: 1});
+ assert.eq(0, testDB.foo.count());
+ checkCommandSucceeded(testDB, {create: 'baz'});
+ } else {
+ print("Checking write operations, should fail");
+ testDB.foo.insert({a: 1, i: 1, j: 1});
+ assert.eq(0, authenticatedConn.getDB('test').foo.count({a: 1, i: 1, j: 1}));
+ checkCommandFailed(
+ testDB, {findAndModify: "foo", query: {a: 1, i: 1, j: 1}, update: {$set: {b: 1}}});
+ checkCommandFailed(testDB, {reIndex: 'foo'});
+ checkCommandFailed(testDB, {repairDatabase: 1});
+ checkCommandFailed(testDB,
+ {mapreduce: 'foo', map: map, reduce: reduce, out: 'mrOutput'});
+ checkCommandFailed(testDB, {drop: 'foo'});
+ checkCommandFailed(testDB, {dropDatabase: 1});
+ passed = true;
+ try {
+ // For some reason when create fails it throws an exception instead of just
+ // returning ok:0
+ res = testDB.runCommand({create: 'baz'});
+ if (!res.ok) {
+ passed = false;
+ }
+ } catch (e) {
+ // expected
+ printjson(e);
passed = false;
}
- } catch (e) {
- // expected
- printjson(e);
- passed = false;
+ assert(!passed);
}
- assert( !passed );
- }
-};
-
-var checkAdminOps = function( hasAuth ) {
- if ( hasAuth ) {
- checkCommandSucceeded( adminDB, {getCmdLineOpts : 1} );
- checkCommandSucceeded( adminDB, {serverStatus : 1} );
- checkCommandSucceeded( adminDB, {listShards : 1} );
- checkCommandSucceeded( adminDB, {whatsmyuri : 1} );
- checkCommandSucceeded( adminDB, {isdbgrid : 1} );
- checkCommandSucceeded( adminDB, {ismaster : 1} );
- checkCommandSucceeded( adminDB, {split : 'test.foo', find : {i : 1, j : 1}} );
- chunk = configDB.chunks.findOne({ shard : st.rs0.name });
- checkCommandSucceeded( adminDB, {moveChunk : 'test.foo', find : chunk.min,
- to : st.rs1.name, _waitForDelete : true} );
- } else {
- checkCommandFailed( adminDB, {getCmdLineOpts : 1} );
- checkCommandFailed( adminDB, {serverStatus : 1} );
- checkCommandFailed( adminDB, {listShards : 1} );
- // whatsmyuri, isdbgrid, and ismaster don't require any auth
- checkCommandSucceeded( adminDB, {whatsmyuri : 1} );
- checkCommandSucceeded( adminDB, {isdbgrid : 1} );
- checkCommandSucceeded( adminDB, {ismaster : 1} );
- checkCommandFailed( adminDB, {split : 'test.foo', find : {i : 1, j : 1}} );
- chunkKey = { i : { $minKey : 1 }, j : { $minKey : 1 } };
- checkCommandFailed( adminDB, {moveChunk : 'test.foo', find : chunkKey,
- to : st.rs1.name, _waitForDelete : true} );
-
- }
-};
-
-var checkRemoveShard = function( hasWriteAuth ) {
- if ( hasWriteAuth ) {
- // start draining
- checkCommandSucceeded( adminDB, { removeshard : st.rs1.name } );
- // Wait for shard to be completely removed
- checkRemoveShard = function() {
- res = checkCommandSucceeded( adminDB, { removeshard : st.rs1.name } );
- return res.msg == 'removeshard completed successfully';
- };
- assert.soon( checkRemoveShard , "failed to remove shard" );
- } else {
- checkCommandFailed( adminDB, { removeshard : st.rs1.name } );
- }
-};
-
-var checkAddShard = function( hasWriteAuth ) {
- if ( hasWriteAuth ) {
- checkCommandSucceeded( adminDB, { addshard : st.rs1.getURL() } );
- } else {
- checkCommandFailed( adminDB, { addshard : st.rs1.getURL() } );
- }
-};
-
-
-st.stopBalancer();
-
-jsTestLog("Checking admin commands with admin auth credentials");
-checkAdminOps( true );
-assert( adminDB.logout().ok );
-
-jsTestLog("Checking admin commands with no auth credentials");
-checkAdminOps( false );
-
-jsTestLog("Checking commands with no auth credentials");
-checkReadOps( false );
-checkWriteOps( false );
-
-// Authenticate as read-only user
-jsTestLog("Checking commands with read-only auth credentials");
-assert( testDB.auth( roUser, password ) );
-checkReadOps( true );
-checkWriteOps( false );
-
-// Authenticate as read-write user
-jsTestLog("Checking commands with read-write auth credentials");
-assert( testDB.auth( rwUser, password ) );
-checkReadOps( true );
-checkWriteOps( true );
-
-
-jsTestLog("Check drainging/removing a shard");
-assert( testDB.logout().ok );
-checkRemoveShard( false );
-assert( adminDB.auth( rwUser, password ) );
-assert( testDB.dropDatabase().ok );
-checkRemoveShard( true );
-st.printShardingStatus();
-
-jsTestLog("Check adding a shard");
-assert( adminDB.logout().ok );
-checkAddShard( false );
-assert( adminDB.auth( rwUser, password ) );
-checkAddShard( true );
-st.printShardingStatus();
+ };
+
+ var checkAdminOps = function(hasAuth) {
+ if (hasAuth) {
+ checkCommandSucceeded(adminDB, {getCmdLineOpts: 1});
+ checkCommandSucceeded(adminDB, {serverStatus: 1});
+ checkCommandSucceeded(adminDB, {listShards: 1});
+ checkCommandSucceeded(adminDB, {whatsmyuri: 1});
+ checkCommandSucceeded(adminDB, {isdbgrid: 1});
+ checkCommandSucceeded(adminDB, {ismaster: 1});
+ checkCommandSucceeded(adminDB, {split: 'test.foo', find: {i: 1, j: 1}});
+ chunk = configDB.chunks.findOne({shard: st.rs0.name});
+ checkCommandSucceeded(
+ adminDB,
+ {moveChunk: 'test.foo', find: chunk.min, to: st.rs1.name, _waitForDelete: true});
+ } else {
+ checkCommandFailed(adminDB, {getCmdLineOpts: 1});
+ checkCommandFailed(adminDB, {serverStatus: 1});
+ checkCommandFailed(adminDB, {listShards: 1});
+ // whatsmyuri, isdbgrid, and ismaster don't require any auth
+ checkCommandSucceeded(adminDB, {whatsmyuri: 1});
+ checkCommandSucceeded(adminDB, {isdbgrid: 1});
+ checkCommandSucceeded(adminDB, {ismaster: 1});
+ checkCommandFailed(adminDB, {split: 'test.foo', find: {i: 1, j: 1}});
+ chunkKey = {
+ i: {$minKey: 1},
+ j: {$minKey: 1}
+ };
+ checkCommandFailed(
+ adminDB,
+ {moveChunk: 'test.foo', find: chunkKey, to: st.rs1.name, _waitForDelete: true});
+ }
+ };
+
+ var checkRemoveShard = function(hasWriteAuth) {
+ if (hasWriteAuth) {
+ // start draining
+ checkCommandSucceeded(adminDB, {removeshard: st.rs1.name});
+ // Wait for shard to be completely removed
+ checkRemoveShard = function() {
+ res = checkCommandSucceeded(adminDB, {removeshard: st.rs1.name});
+ return res.msg == 'removeshard completed successfully';
+ };
+ assert.soon(checkRemoveShard, "failed to remove shard");
+ } else {
+ checkCommandFailed(adminDB, {removeshard: st.rs1.name});
+ }
+ };
-st.stop();
+ var checkAddShard = function(hasWriteAuth) {
+ if (hasWriteAuth) {
+ checkCommandSucceeded(adminDB, {addshard: st.rs1.getURL()});
+ } else {
+ checkCommandFailed(adminDB, {addshard: st.rs1.getURL()});
+ }
+ };
+
+ st.stopBalancer();
+
+ jsTestLog("Checking admin commands with admin auth credentials");
+ checkAdminOps(true);
+ assert(adminDB.logout().ok);
+
+ jsTestLog("Checking admin commands with no auth credentials");
+ checkAdminOps(false);
+
+ jsTestLog("Checking commands with no auth credentials");
+ checkReadOps(false);
+ checkWriteOps(false);
+
+ // Authenticate as read-only user
+ jsTestLog("Checking commands with read-only auth credentials");
+ assert(testDB.auth(roUser, password));
+ checkReadOps(true);
+ checkWriteOps(false);
+
+ // Authenticate as read-write user
+ jsTestLog("Checking commands with read-write auth credentials");
+ assert(testDB.auth(rwUser, password));
+ checkReadOps(true);
+ checkWriteOps(true);
+
+ jsTestLog("Check drainging/removing a shard");
+ assert(testDB.logout().ok);
+ checkRemoveShard(false);
+ assert(adminDB.auth(rwUser, password));
+ assert(testDB.dropDatabase().ok);
+ checkRemoveShard(true);
+ st.printShardingStatus();
+
+ jsTestLog("Check adding a shard");
+ assert(adminDB.logout().ok);
+ checkAddShard(false);
+ assert(adminDB.auth(rwUser, password));
+ checkAddShard(true);
+ st.printShardingStatus();
+
+ st.stop();
};
doTest();
diff --git a/jstests/sharding/authConnectionHook.js b/jstests/sharding/authConnectionHook.js
index 4356180107d..516b0d34554 100644
--- a/jstests/sharding/authConnectionHook.js
+++ b/jstests/sharding/authConnectionHook.js
@@ -1,7 +1,12 @@
-// Test for SERVER-8786 - if the first operation on an authenticated shard is moveChunk, it breaks the cluster.
-var st = new ShardingTest({ keyFile : 'jstests/libs/key1', shards : 2, chunkSize: 1, verbose : 2,
- other : { nopreallocj : 1, verbose : 2, useHostname : true,
- configOptions : { verbose : 2 }}});
+// Test for SERVER-8786 - if the first operation on an authenticated shard is moveChunk, it breaks
+// the cluster.
+var st = new ShardingTest({
+ keyFile: 'jstests/libs/key1',
+ shards: 2,
+ chunkSize: 1,
+ verbose: 2,
+ other: {nopreallocj: 1, verbose: 2, useHostname: true, configOptions: {verbose: 2}}
+});
var mongos = st.s;
var adminDB = mongos.getDB('admin');
@@ -11,18 +16,18 @@ adminDB.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles
adminDB.auth('admin', 'password');
-adminDB.runCommand({enableSharding : "test"});
+adminDB.runCommand({enableSharding: "test"});
st.ensurePrimaryShard('test', 'shard0001');
-adminDB.runCommand({shardCollection : "test.foo", key : {x : 1}});
+adminDB.runCommand({shardCollection: "test.foo", key: {x: 1}});
for (var i = 0; i < 100; i++) {
- db.foo.insert({x:i});
+ db.foo.insert({x: i});
}
-adminDB.runCommand({split: "test.foo", middle: {x:50}});
-var curShard = st.getShard("test.foo", {x:75});
+adminDB.runCommand({split: "test.foo", middle: {x: 50}});
+var curShard = st.getShard("test.foo", {x: 75});
var otherShard = st.getOther(curShard).name;
-adminDB.runCommand({moveChunk: "test.foo", find: {x:25}, to: otherShard, _waitForDelete:true});
+adminDB.runCommand({moveChunk: "test.foo", find: {x: 25}, to: otherShard, _waitForDelete: true});
st.printShardingStatus();
@@ -30,16 +35,13 @@ MongoRunner.stopMongod(st.shard0);
st.shard0 = MongoRunner.runMongod({restart: st.shard0});
// May fail the first couple times due to socket exceptions
-assert.soon( function() {
- var res = adminDB.runCommand({moveChunk: "test.foo",
- find: {x:75},
- to: otherShard});
- printjson(res);
- return res.ok;
- });
-
-
-printjson(db.foo.findOne({x:25}));
-printjson(db.foo.findOne({x:75}));
+assert.soon(function() {
+ var res = adminDB.runCommand({moveChunk: "test.foo", find: {x: 75}, to: otherShard});
+ printjson(res);
+ return res.ok;
+});
+
+printjson(db.foo.findOne({x: 25}));
+printjson(db.foo.findOne({x: 75}));
st.stop();
diff --git a/jstests/sharding/auth_add_shard.js b/jstests/sharding/auth_add_shard.js
index 8435c768c4f..4f0fec6de83 100644
--- a/jstests/sharding/auth_add_shard.js
+++ b/jstests/sharding/auth_add_shard.js
@@ -2,98 +2,102 @@
// The puporse of this test is to test authentication when adding/removing a shard. The test sets
// up a sharded system, then adds/removes a shard.
(function() {
-'use strict';
+ 'use strict';
-// login method to login into the database
-function login(userObj) {
- var authResult = mongos.getDB(userObj.db).auth(userObj.username, userObj.password);
- printjson(authResult);
-}
+ // login method to login into the database
+ function login(userObj) {
+ var authResult = mongos.getDB(userObj.db).auth(userObj.username, userObj.password);
+ printjson(authResult);
+ }
-// admin user object
-var adminUser = { db: "admin", username: "foo", password: "bar" };
+ // admin user object
+ var adminUser = {
+ db: "admin",
+ username: "foo",
+ password: "bar"
+ };
-//set up a 2 shard cluster with keyfile
-var st = new ShardingTest({ name: "auth_add_shard1", shards: 1,
- mongos: 1, keyFile: "jstests/libs/key1" });
+ // set up a 2 shard cluster with keyfile
+ var st = new ShardingTest(
+ {name: "auth_add_shard1", shards: 1, mongos: 1, keyFile: "jstests/libs/key1"});
-var mongos = st.s0;
-var admin = mongos.getDB("admin");
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
-print("1 shard system setup");
+ print("1 shard system setup");
-//add the admin user
-print("adding user");
-mongos.getDB(adminUser.db).createUser({ user: adminUser.username,
- pwd: adminUser.password,
- roles: jsTest.adminUserRoles});
+ // add the admin user
+ print("adding user");
+ mongos.getDB(adminUser.db)
+ .createUser(
+ {user: adminUser.username, pwd: adminUser.password, roles: jsTest.adminUserRoles});
-//login as admin user
-login(adminUser);
+ // login as admin user
+ login(adminUser);
-assert.eq(1, st.config.shards.count() , "initial server count wrong");
+ assert.eq(1, st.config.shards.count(), "initial server count wrong");
-//start a mongod with NO keyfile
-var conn = MongoRunner.runMongod({});
-print(conn);
+ // start a mongod with NO keyfile
+ var conn = MongoRunner.runMongod({});
+ print(conn);
-// --------------- Test 1 --------------------
-// Add shard to the existing cluster (should fail because it was added without a keyfile)
-printjson(assert.commandFailed(admin.runCommand({ addShard: conn.host })));
+ // --------------- Test 1 --------------------
+ // Add shard to the existing cluster (should fail because it was added without a keyfile)
+ printjson(assert.commandFailed(admin.runCommand({addShard: conn.host})));
-// stop mongod
-MongoRunner.stopMongod(conn);
+ // stop mongod
+ MongoRunner.stopMongod(conn);
-//--------------- Test 2 --------------------
-//start mongod again, this time with keyfile
-var conn = MongoRunner.runMongod({keyFile: "jstests/libs/key1"});
-//try adding the new shard
-assert.commandWorked(admin.runCommand({ addShard: conn.host }));
+ //--------------- Test 2 --------------------
+ // start mongod again, this time with keyfile
+ var conn = MongoRunner.runMongod({keyFile: "jstests/libs/key1"});
+ // try adding the new shard
+ assert.commandWorked(admin.runCommand({addShard: conn.host}));
-//Add some data
-var db = mongos.getDB("foo");
-var collA = mongos.getCollection("foo.bar");
+ // Add some data
+ var db = mongos.getDB("foo");
+ var collA = mongos.getCollection("foo.bar");
-// enable sharding on a collection
-assert.commandWorked(admin.runCommand({ enableSharding: "" + collA.getDB() }));
-st.ensurePrimaryShard("foo", "shard0000");
+ // enable sharding on a collection
+ assert.commandWorked(admin.runCommand({enableSharding: "" + collA.getDB()}));
+ st.ensurePrimaryShard("foo", "shard0000");
-assert.commandWorked(admin.runCommand({ shardCollection: "" + collA, key: { _id: 1 } }));
+ assert.commandWorked(admin.runCommand({shardCollection: "" + collA, key: {_id: 1}}));
-// add data to the sharded collection
-for (var i = 0; i < 4; i++) {
- db.bar.save({ _id: i });
- assert.commandWorked(admin.runCommand({ split: "" + collA, middle: { _id: i } }));
-}
+ // add data to the sharded collection
+ for (var i = 0; i < 4; i++) {
+ db.bar.save({_id: i});
+ assert.commandWorked(admin.runCommand({split: "" + collA, middle: {_id: i}}));
+ }
-// move a chunk
-assert.commandWorked(admin.runCommand({ moveChunk: "foo.bar", find: { _id: 1 }, to: "shard0001" }));
+ // move a chunk
+ assert.commandWorked(admin.runCommand({moveChunk: "foo.bar", find: {_id: 1}, to: "shard0001"}));
-//verify the chunk was moved
-admin.runCommand({ flushRouterConfig: 1 });
+ // verify the chunk was moved
+ admin.runCommand({flushRouterConfig: 1});
-var config = mongos.getDB("config");
-st.printShardingStatus(true);
+ var config = mongos.getDB("config");
+ st.printShardingStatus(true);
-// start balancer before removing the shard
-st.startBalancer();
+ // start balancer before removing the shard
+ st.startBalancer();
-//--------------- Test 3 --------------------
-// now drain the shard
-assert.commandWorked(admin.runCommand({removeShard: conn.host}));
+ //--------------- Test 3 --------------------
+ // now drain the shard
+ assert.commandWorked(admin.runCommand({removeShard: conn.host}));
-// give it some time to drain
-assert.soon(function() {
- var result = admin.runCommand({removeShard: conn.host});
- printjson(result);
+ // give it some time to drain
+ assert.soon(function() {
+ var result = admin.runCommand({removeShard: conn.host});
+ printjson(result);
- return result.ok && result.state == "completed";
-}, "failed to drain shard completely", 5 * 60 * 1000);
+ return result.ok && result.state == "completed";
+ }, "failed to drain shard completely", 5 * 60 * 1000);
-assert.eq(1, st.config.shards.count() , "removed server still appears in count");
+ assert.eq(1, st.config.shards.count(), "removed server still appears in count");
-MongoRunner.stopMongod(conn);
+ MongoRunner.stopMongod(conn);
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/auth_copydb.js b/jstests/sharding/auth_copydb.js
index 6ecb45ac201..8c73214019e 100644
--- a/jstests/sharding/auth_copydb.js
+++ b/jstests/sharding/auth_copydb.js
@@ -1,44 +1,41 @@
// Tests the copydb command on mongos with auth
var runTest = function() {
+ var st = new ShardingTest({shards: 1, mongos: 1, keyFile: "jstests/libs/key1"});
+ var mongos = st.s0;
+ var destAdminDB = mongos.getDB('admin');
+ var destTestDB = mongos.getDB('test');
-var st = new ShardingTest({ shards : 1,
- mongos : 1,
- keyFile : "jstests/libs/key1"});
-var mongos = st.s0;
-var destAdminDB = mongos.getDB('admin');
-var destTestDB = mongos.getDB('test');
+ var sourceMongodConn = MongoRunner.runMongod({});
+ var sourceTestDB = sourceMongodConn.getDB('test');
-var sourceMongodConn = MongoRunner.runMongod({});
-var sourceTestDB = sourceMongodConn.getDB('test');
+ sourceTestDB.foo.insert({a: 1});
-sourceTestDB.foo.insert({a:1});
+ destAdminDB.createUser({
+ user: 'admin',
+ pwd: 'password',
+ roles: jsTest.adminUserRoles
+ }); // Turns on access control enforcement
-destAdminDB.createUser({user: 'admin', pwd: 'password', roles: jsTest.adminUserRoles}); // Turns on access control enforcement
+ jsTestLog("Running copydb that should fail");
+ var res = destAdminDB.runCommand(
+ {copydb: 1, fromhost: sourceMongodConn.host, fromdb: 'test', todb: 'test'});
+ printjson(res);
+ assert.commandFailed(res);
-jsTestLog("Running copydb that should fail");
-var res = destAdminDB.runCommand({copydb:1,
- fromhost: sourceMongodConn.host,
- fromdb:'test',
- todb:'test'});
-printjson(res);
-assert.commandFailed(res);
+ destAdminDB.auth('admin', 'password');
+ assert.eq(0, destTestDB.foo.count()); // Be extra sure the copydb didn't secretly succeed.
-destAdminDB.auth('admin', 'password');
-assert.eq(0, destTestDB.foo.count()); // Be extra sure the copydb didn't secretly succeed.
+ jsTestLog("Running copydb that should succeed");
+ res = destAdminDB.runCommand(
+ {copydb: 1, fromhost: sourceMongodConn.host, fromdb: 'test', todb: 'test'});
+ printjson(res);
+ assert.commandWorked(res);
-jsTestLog("Running copydb that should succeed");
-res = destAdminDB.runCommand({copydb:1,
- fromhost: sourceMongodConn.host,
- fromdb:'test',
- todb:'test'});
-printjson(res);
-assert.commandWorked(res);
+ assert.eq(1, destTestDB.foo.count());
+ assert.eq(1, destTestDB.foo.findOne().a);
-assert.eq(1, destTestDB.foo.count());
-assert.eq(1, destTestDB.foo.findOne().a);
-
-st.stop();
+ st.stop();
};
diff --git a/jstests/sharding/auth_repl.js b/jstests/sharding/auth_repl.js
index 568cbc4a5ac..9e1ddb06873 100644
--- a/jstests/sharding/auth_repl.js
+++ b/jstests/sharding/auth_repl.js
@@ -1,5 +1,5 @@
-var replTest = new ReplSetTest({ nodes: 3, useHostName : false, keyFile: 'jstests/libs/key1' });
-replTest.startSet({ oplogSize: 10 });
+var replTest = new ReplSetTest({nodes: 3, useHostName: false, keyFile: 'jstests/libs/key1'});
+replTest.startSet({oplogSize: 10});
replTest.initiate();
replTest.awaitSecondaryNodes();
@@ -33,7 +33,7 @@ priTestDB.createUser({user: 'a', pwd: 'a', roles: jsTest.basicUserRoles},
assert.eq(1, testDB.auth('a', 'a'));
jsTest.log('Sending an authorized query that should be ok');
-assert.writeOK(testColl.insert({ x: 1 }, { writeConcern: { w: nodeCount }}));
+assert.writeOK(testColl.insert({x: 1}, {writeConcern: {w: nodeCount}}));
conn.setSlaveOk(true);
doc = testColl.findOne();
@@ -114,10 +114,10 @@ for (var x = 0; x < nodeCount; x++) {
}
}
-assert(secNodeIdx >= 0); // test sanity check
+assert(secNodeIdx >= 0); // test sanity check
// Kill the cached secondary
-replTest.stop(secNodeIdx, 15, { auth: { user: 'user', pwd: 'user' }});
+replTest.stop(secNodeIdx, 15, {auth: {user: 'user', pwd: 'user'}});
assert(testDB.logout().ok);
@@ -129,4 +129,3 @@ queryToSecShouldFail();
queryToPriShouldFail();
replTest.stopSet();
-
diff --git a/jstests/sharding/auth_slaveok_routing.js b/jstests/sharding/auth_slaveok_routing.js
index 599aed242b5..a01314fe405 100644
--- a/jstests/sharding/auth_slaveok_routing.js
+++ b/jstests/sharding/auth_slaveok_routing.js
@@ -18,25 +18,27 @@
*
* @return {boolean} true if query was routed to a secondary node.
*/
-function doesRouteToSec( coll, query ) {
- var explain = coll.find( query ).explain();
+function doesRouteToSec(coll, query) {
+ var explain = coll.find(query).explain();
assert.eq("SINGLE_SHARD", explain.queryPlanner.winningPlan.stage);
var serverInfo = explain.queryPlanner.winningPlan.shards[0].serverInfo;
- var conn = new Mongo( serverInfo.host + ":" + serverInfo.port.toString());
- var cmdRes = conn.getDB( 'admin' ).runCommand({ isMaster: 1 });
+ var conn = new Mongo(serverInfo.host + ":" + serverInfo.port.toString());
+ var cmdRes = conn.getDB('admin').runCommand({isMaster: 1});
jsTest.log('isMaster: ' + tojson(cmdRes));
return cmdRes.secondary;
}
-var rsOpts = { oplogSize: 50 };
-var st = new ShardingTest({ keyFile: 'jstests/libs/key1', shards: 1,
- rs: rsOpts, other: { nopreallocj: 1 }});
+var rsOpts = {
+ oplogSize: 50
+};
+var st = new ShardingTest(
+ {keyFile: 'jstests/libs/key1', shards: 1, rs: rsOpts, other: {nopreallocj: 1}});
var mongos = st.s;
var replTest = st.rs0;
-var testDB = mongos.getDB( 'AAAAA' );
+var testDB = mongos.getDB('AAAAA');
var coll = testDB.user;
var nodeCount = replTest.nodes.length;
@@ -45,69 +47,65 @@ var nodeCount = replTest.nodes.length;
* connections to access the server from localhost connections if there
* is no admin user.
*/
-var adminDB = mongos.getDB( 'admin' );
+var adminDB = mongos.getDB('admin');
adminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles});
-adminDB.auth( 'user', 'password' );
-var priAdminDB = replTest.getPrimary().getDB( 'admin' );
-priAdminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles},
+adminDB.auth('user', 'password');
+var priAdminDB = replTest.getPrimary().getDB('admin');
+priAdminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles},
{w: 3, wtimeout: 30000});
coll.drop();
-coll.setSlaveOk( true );
+coll.setSlaveOk(true);
/* Secondaries should be up here, but they can still be in RECOVERY
* state, which will make the ReplicaSetMonitor mark them as
* ok = false and not eligible for slaveOk queries.
*/
-ReplSetTest.awaitRSClientHosts( mongos, replTest.getSecondaries(),
- { ok : true, secondary : true });
+ReplSetTest.awaitRSClientHosts(mongos, replTest.getSecondaries(), {ok: true, secondary: true});
var bulk = coll.initializeUnorderedBulkOp();
-for ( var x = 0; x < 20; x++ ) {
- bulk.insert({ v: x, k: 10 });
+for (var x = 0; x < 20; x++) {
+ bulk.insert({v: x, k: 10});
}
-assert.writeOK(bulk.execute({ w: nodeCount }));
+assert.writeOK(bulk.execute({w: nodeCount}));
/* Although mongos never caches query results, try to do a different query
* everytime just to be sure.
*/
var vToFind = 0;
-jsTest.log( 'First query to SEC' );
-assert( doesRouteToSec( coll, { v: vToFind++ }));
+jsTest.log('First query to SEC');
+assert(doesRouteToSec(coll, {v: vToFind++}));
var SIG_TERM = 15;
-replTest.stopSet( SIG_TERM, true, { auth: { user: 'user', pwd: 'password' }});
+replTest.stopSet(SIG_TERM, true, {auth: {user: 'user', pwd: 'password'}});
-for ( var n = 0; n < nodeCount; n++ ) {
- replTest.restart( n, rsOpts );
+for (var n = 0; n < nodeCount; n++) {
+ replTest.restart(n, rsOpts);
}
replTest.awaitSecondaryNodes();
-coll.setSlaveOk( true );
+coll.setSlaveOk(true);
/* replSetMonitor does not refresh the nodes information when getting secondaries.
* A node that is previously labeled as secondary can now be a primary, so we
* wait for the replSetMonitorWatcher thread to refresh the nodes information.
*/
-ReplSetTest.awaitRSClientHosts( mongos, replTest.getSecondaries(),
- { ok : true, secondary : true });
+ReplSetTest.awaitRSClientHosts(mongos, replTest.getSecondaries(), {ok: true, secondary: true});
//
-// We also need to wait for the primary, it's possible that the mongos may think a node is a
+// We also need to wait for the primary, it's possible that the mongos may think a node is a
// secondary but it actually changed to a primary before we send our final query.
//
-ReplSetTest.awaitRSClientHosts( mongos, replTest.getPrimary(),
- { ok : true, ismaster : true });
+ReplSetTest.awaitRSClientHosts(mongos, replTest.getPrimary(), {ok: true, ismaster: true});
// Recheck if we can still query secondaries after refreshing connections.
-jsTest.log( 'Final query to SEC' );
-assert( doesRouteToSec( coll, { v: vToFind++ }));
+jsTest.log('Final query to SEC');
+assert(doesRouteToSec(coll, {v: vToFind++}));
// Cleanup auth so Windows will be able to shutdown gracefully
-priAdminDB = replTest.getPrimary().getDB( 'admin' );
-priAdminDB.auth( 'user', 'password' );
-priAdminDB.dropUser( 'user' );
+priAdminDB = replTest.getPrimary().getDB('admin');
+priAdminDB.auth('user', 'password');
+priAdminDB.dropUser('user');
st.stop();
-
diff --git a/jstests/sharding/authmr.js b/jstests/sharding/authmr.js
index c827b4948b3..6484c729474 100644
--- a/jstests/sharding/authmr.js
+++ b/jstests/sharding/authmr.js
@@ -3,118 +3,117 @@
(function() {
-//
-// User document declarations. All users in this test are added to the admin database.
-//
-
-var adminUser = {
- user: "admin",
- pwd: "a",
- roles: [ "readWriteAnyDatabase",
- "dbAdminAnyDatabase",
- "userAdminAnyDatabase",
- "clusterAdmin" ]
-};
-
-var test1User = {
- user: "test",
- pwd: "a",
- roles: [{role: 'readWrite', db: 'test1', hasRole: true, canDelegate: false}]
-};
-
-function assertRemove(collection, pattern) {
- assert.writeOK(collection.remove(pattern));
-}
-
-function assertInsert(collection, obj) {
- assert.writeOK(collection.insert(obj));
-}
-
-var cluster = new ShardingTest({ name: "authmr",
- shards: 1,
- mongos: 1,
- other: {
- extraOptions: { keyFile: "jstests/libs/key1" }
- }
- });
-
-// Set up the test data.
-(function() {
- var adminDB = cluster.getDB('admin');
- var test1DB = adminDB.getSiblingDB('test1');
- var test2DB = adminDB.getSiblingDB('test2');
- var ex;
- try {
- adminDB.createUser(adminUser);
- assert(adminDB.auth(adminUser.user, adminUser.pwd));
-
- adminDB.dropUser(test1User.user);
- adminDB.createUser(test1User);
-
- assertInsert(test1DB.foo, { a: 1 });
- assertInsert(test1DB.foo, { a: 2 });
- assertInsert(test1DB.foo, { a: 3 });
- assertInsert(test1DB.foo, { a: 4 });
- assertInsert(test2DB.foo, { x: 1 });
- }
- finally {
- adminDB.logout();
- }
-}());
-
-assert.throws(function() {
- var adminDB = cluster.getDB('admin');
- var test1DB;
- var test2DB;
- assert(adminDB.auth(test1User.user, test1User.pwd));
- try {
- test1DB = adminDB.getSiblingDB("test1");
- test2DB = adminDB.getSiblingDB("test2");
-
- // Sanity check. test1User can count (read) test1, but not test2.
- assert.eq(test1DB.foo.count(), 4);
- assert.throws(test2DB.foo.count);
-
- test1DB.foo.mapReduce(
- function () {
- emit(0, this.a);
- var t2 = new Mongo().getDB("test2");
- t2.ad.insert(this);
- },
- function (k, vs) {
- var t2 = new Mongo().getDB("test2");
- t2.reductio.insert(this);
-
- return Array.sum(vs);
- },
- { out: "bar",
- finalize: function (k, v) {
- for (k in this) {
- if (this.hasOwnProperty(k))
- print(k + "=" + v);
- }
- var t2 = new Mongo().getDB("test2");
- t2.absurdum.insert({ key: k, value: v });
- }
- });
+ //
+ // User document declarations. All users in this test are added to the admin database.
+ //
+
+ var adminUser = {
+ user: "admin",
+ pwd: "a",
+ roles: [
+ "readWriteAnyDatabase",
+ "dbAdminAnyDatabase",
+ "userAdminAnyDatabase",
+ "clusterAdmin"
+ ]
+ };
+
+ var test1User = {
+ user: "test",
+ pwd: "a",
+ roles: [{role: 'readWrite', db: 'test1', hasRole: true, canDelegate: false}]
+ };
+
+ function assertRemove(collection, pattern) {
+ assert.writeOK(collection.remove(pattern));
}
- finally {
- adminDB.logout();
- }
-});
-(function() {
- var adminDB = cluster.getDB('admin');
- assert(adminDB.auth(adminUser.user, adminUser.pwd));
- try {
- var test2DB = cluster.getDB('test2');
- assert.eq(test2DB.reductio.count(), 0, "reductio");
- assert.eq(test2DB.ad.count(), 0, "ad");
- assert.eq(test2DB.absurdum.count(), 0, "absurdum");
- }
- finally {
- adminDB.logout();
+ function assertInsert(collection, obj) {
+ assert.writeOK(collection.insert(obj));
}
-}());
+
+ var cluster = new ShardingTest({
+ name: "authmr",
+ shards: 1,
+ mongos: 1,
+ other: {extraOptions: {keyFile: "jstests/libs/key1"}}
+ });
+
+ // Set up the test data.
+ (function() {
+ var adminDB = cluster.getDB('admin');
+ var test1DB = adminDB.getSiblingDB('test1');
+ var test2DB = adminDB.getSiblingDB('test2');
+ var ex;
+ try {
+ adminDB.createUser(adminUser);
+ assert(adminDB.auth(adminUser.user, adminUser.pwd));
+
+ adminDB.dropUser(test1User.user);
+ adminDB.createUser(test1User);
+
+ assertInsert(test1DB.foo, {a: 1});
+ assertInsert(test1DB.foo, {a: 2});
+ assertInsert(test1DB.foo, {a: 3});
+ assertInsert(test1DB.foo, {a: 4});
+ assertInsert(test2DB.foo, {x: 1});
+ } finally {
+ adminDB.logout();
+ }
+ }());
+
+ assert.throws(function() {
+ var adminDB = cluster.getDB('admin');
+ var test1DB;
+ var test2DB;
+ assert(adminDB.auth(test1User.user, test1User.pwd));
+ try {
+ test1DB = adminDB.getSiblingDB("test1");
+ test2DB = adminDB.getSiblingDB("test2");
+
+ // Sanity check. test1User can count (read) test1, but not test2.
+ assert.eq(test1DB.foo.count(), 4);
+ assert.throws(test2DB.foo.count);
+
+ test1DB.foo.mapReduce(
+ function() {
+ emit(0, this.a);
+ var t2 = new Mongo().getDB("test2");
+ t2.ad.insert(this);
+ },
+ function(k, vs) {
+ var t2 = new Mongo().getDB("test2");
+ t2.reductio.insert(this);
+
+ return Array.sum(vs);
+ },
+ {
+ out: "bar",
+ finalize: function(k, v) {
+ for (k in this) {
+ if (this.hasOwnProperty(k))
+ print(k + "=" + v);
+ }
+ var t2 = new Mongo().getDB("test2");
+ t2.absurdum.insert({key: k, value: v});
+ }
+ });
+ } finally {
+ adminDB.logout();
+ }
+ });
+
+ (function() {
+ var adminDB = cluster.getDB('admin');
+ assert(adminDB.auth(adminUser.user, adminUser.pwd));
+ try {
+ var test2DB = cluster.getDB('test2');
+ assert.eq(test2DB.reductio.count(), 0, "reductio");
+ assert.eq(test2DB.ad.count(), 0, "ad");
+ assert.eq(test2DB.absurdum.count(), 0, "absurdum");
+ } finally {
+ adminDB.logout();
+ }
+ }());
})();
diff --git a/jstests/sharding/authwhere.js b/jstests/sharding/authwhere.js
index 3d3d0d8a605..df27078784b 100644
--- a/jstests/sharding/authwhere.js
+++ b/jstests/sharding/authwhere.js
@@ -3,85 +3,84 @@
(function() {
-//
-// User document declarations. All users in this test are added to the admin database.
-//
+ //
+ // User document declarations. All users in this test are added to the admin database.
+ //
-var adminUser = {
- user: "admin",
- pwd: "a",
- roles: [ "readWriteAnyDatabase",
- "dbAdminAnyDatabase",
- "userAdminAnyDatabase",
- "clusterAdmin" ]
-};
+ var adminUser = {
+ user: "admin",
+ pwd: "a",
+ roles: [
+ "readWriteAnyDatabase",
+ "dbAdminAnyDatabase",
+ "userAdminAnyDatabase",
+ "clusterAdmin"
+ ]
+ };
-var test1Reader = {
- user: "test",
- pwd: "a",
- roles: [{role: 'read', db: 'test1', hasRole:true, canDelegate: false}]
-};
+ var test1Reader = {
+ user: "test",
+ pwd: "a",
+ roles: [{role: 'read', db: 'test1', hasRole: true, canDelegate: false}]
+ };
-function assertRemove(collection, pattern) {
- assert.writeOK(collection.remove(pattern));
-}
+ function assertRemove(collection, pattern) {
+ assert.writeOK(collection.remove(pattern));
+ }
-function assertInsert(collection, obj) {
- assert.writeOK(collection.insert(obj));
-}
+ function assertInsert(collection, obj) {
+ assert.writeOK(collection.insert(obj));
+ }
-var cluster = new ShardingTest({ name: "authwhere",
- shards: 1,
- mongos: 1,
- other: {
- extraOptions: { keyFile: "jstests/libs/key1" }
- }
- });
+ var cluster = new ShardingTest({
+ name: "authwhere",
+ shards: 1,
+ mongos: 1,
+ other: {extraOptions: {keyFile: "jstests/libs/key1"}}
+ });
-// Set up the test data.
-(function() {
- var adminDB = cluster.getDB('admin');
- var test1DB = adminDB.getSiblingDB('test1');
- var test2DB = adminDB.getSiblingDB('test2');
- var ex;
- try {
- adminDB.createUser(adminUser);
- assert(adminDB.auth(adminUser.user, adminUser.pwd));
+ // Set up the test data.
+ (function() {
+ var adminDB = cluster.getDB('admin');
+ var test1DB = adminDB.getSiblingDB('test1');
+ var test2DB = adminDB.getSiblingDB('test2');
+ var ex;
+ try {
+ adminDB.createUser(adminUser);
+ assert(adminDB.auth(adminUser.user, adminUser.pwd));
- adminDB.dropUser(test1Reader.user);
- adminDB.createUser(test1Reader);
+ adminDB.dropUser(test1Reader.user);
+ adminDB.createUser(test1Reader);
- assertInsert(test1DB.foo, { a: 1 });
- assertInsert(test2DB.foo, { x: 1 });
- }
- finally {
- adminDB.logout();
- }
-}());
+ assertInsert(test1DB.foo, {a: 1});
+ assertInsert(test2DB.foo, {x: 1});
+ } finally {
+ adminDB.logout();
+ }
+ }());
-(function() {
- var adminDB = cluster.getDB('admin');
- var test1DB;
- var test2DB;
- assert(adminDB.auth(test1Reader.user, test1Reader.pwd));
- try {
- test1DB = adminDB.getSiblingDB("test1");
- test2DB = adminDB.getSiblingDB("test2");
+ (function() {
+ var adminDB = cluster.getDB('admin');
+ var test1DB;
+ var test2DB;
+ assert(adminDB.auth(test1Reader.user, test1Reader.pwd));
+ try {
+ test1DB = adminDB.getSiblingDB("test1");
+ test2DB = adminDB.getSiblingDB("test2");
- // Sanity check. test1Reader can count (read) test1, but not test2.
- assert.eq(test1DB.foo.count(), 1);
- assert.throws(test2DB.foo.count);
+ // Sanity check. test1Reader can count (read) test1, but not test2.
+ assert.eq(test1DB.foo.count(), 1);
+ assert.throws(test2DB.foo.count);
- // Cannot examine second database from a where clause.
- assert.throws(test1DB.foo.count, ["db.getSiblingDB('test2').foo.count() == 1"]);
+ // Cannot examine second database from a where clause.
+ assert.throws(test1DB.foo.count, ["db.getSiblingDB('test2').foo.count() == 1"]);
- // Cannot write test1 via tricky where clause.
- assert.throws(test1DB.foo.count, ["db.foo.insert({b: 1})"]);
- assert.eq(test1DB.foo.count(), 1);
- }
- finally {
- adminDB.logout();
- }
-}());
+ // Cannot write test1 via tricky where clause.
+ assert.throws(test1DB.foo.count, ["db.foo.insert({b: 1})"]);
+ assert.eq(test1DB.foo.count(), 1);
+ } finally {
+ adminDB.logout();
+ }
+ }());
})();
diff --git a/jstests/sharding/auto1.js b/jstests/sharding/auto1.js
index 214e5aae1ed..3fbcef78e82 100644
--- a/jstests/sharding/auto1.js
+++ b/jstests/sharding/auto1.js
@@ -1,80 +1,79 @@
(function() {
-var s = new ShardingTest({ name: "auto1",
- shards: 2,
- mongos: 1,
- other: { enableBalancer : 1 } });
+ var s = new ShardingTest({name: "auto1", shards: 2, mongos: 1, other: {enableBalancer: 1}});
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
-bigString = "";
-while ( bigString.length < 1024 * 50 )
- bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
+ bigString = "";
+ while (bigString.length < 1024 * 50)
+ bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
-db = s.getDB( "test" );
-coll = db.foo;
+ db = s.getDB("test");
+ coll = db.foo;
-var i=0;
+ var i = 0;
-var bulk = coll.initializeUnorderedBulkOp();
-for ( ; i<100; i++ ){
- bulk.insert( { num : i , s : bigString } );
-}
-assert.writeOK( bulk.execute() );
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (; i < 100; i++) {
+ bulk.insert({num: i, s: bigString});
+ }
+ assert.writeOK(bulk.execute());
-primary = s.getPrimaryShard( "test" ).getDB( "test" );
+ primary = s.getPrimaryShard("test").getDB("test");
-counts = [];
+ counts = [];
-s.printChunks();
-counts.push( s.config.chunks.count() );
-assert.eq(100, db.foo.find().itcount());
+ s.printChunks();
+ counts.push(s.config.chunks.count());
+ assert.eq(100, db.foo.find().itcount());
-print( "datasize: " + tojson( s.getPrimaryShard( "test" ).getDB( "admin" )
- .runCommand( { datasize : "test.foo" } ) ) );
+ print("datasize: " +
+ tojson(s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "test.foo"})));
-bulk = coll.initializeUnorderedBulkOp();
-for ( ; i<200; i++ ){
- bulk.insert( { num : i , s : bigString } );
-}
-assert.writeOK( bulk.execute() );
+ bulk = coll.initializeUnorderedBulkOp();
+ for (; i < 200; i++) {
+ bulk.insert({num: i, s: bigString});
+ }
+ assert.writeOK(bulk.execute());
-s.printChunks();
-s.printChangeLog();
-counts.push( s.config.chunks.count() );
+ s.printChunks();
+ s.printChangeLog();
+ counts.push(s.config.chunks.count());
-bulk = coll.initializeUnorderedBulkOp();
-for ( ; i<400; i++ ){
- bulk.insert( { num : i , s : bigString } );
-}
-assert.writeOK( bulk.execute() );
+ bulk = coll.initializeUnorderedBulkOp();
+ for (; i < 400; i++) {
+ bulk.insert({num: i, s: bigString});
+ }
+ assert.writeOK(bulk.execute());
-s.printChunks();
-s.printChangeLog();
-counts.push( s.config.chunks.count() );
+ s.printChunks();
+ s.printChangeLog();
+ counts.push(s.config.chunks.count());
-bulk = coll.initializeUnorderedBulkOp();
-for ( ; i<700; i++ ){
- bulk.insert( { num : i , s : bigString } );
-}
-assert.writeOK( bulk.execute() );
+ bulk = coll.initializeUnorderedBulkOp();
+ for (; i < 700; i++) {
+ bulk.insert({num: i, s: bigString});
+ }
+ assert.writeOK(bulk.execute());
-s.printChunks();
-s.printChangeLog();
-counts.push( s.config.chunks.count() );
+ s.printChunks();
+ s.printChangeLog();
+ counts.push(s.config.chunks.count());
-assert( counts[counts.length-1] > counts[0] , "counts 1 : " + tojson( counts ) );
-sorted = counts.slice(0);
-// Sort doesn't sort numbers correctly by default, resulting in fail
-sorted.sort( function(a, b){ return a - b; } );
-assert.eq( counts , sorted , "counts 2 : " + tojson( counts ) );
+ assert(counts[counts.length - 1] > counts[0], "counts 1 : " + tojson(counts));
+ sorted = counts.slice(0);
+ // Sort doesn't sort numbers correctly by default, resulting in fail
+ sorted.sort(function(a, b) {
+ return a - b;
+ });
+ assert.eq(counts, sorted, "counts 2 : " + tojson(counts));
-print( counts );
+ print(counts);
-printjson( db.stats() );
+ printjson(db.stats());
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/auto2.js b/jstests/sharding/auto2.js
index 81f0c1f17ea..3d21559f8d6 100644
--- a/jstests/sharding/auto2.js
+++ b/jstests/sharding/auto2.js
@@ -1,151 +1,152 @@
(function() {
-var s = new ShardingTest({ name: "auto2",
- shards: 2,
- mongos: 2 });
-
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
-
-var bigString = "";
-while (bigString.length < 1024 * 50) {
- bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
-}
-
-var db = s.getDB("test" );
-var coll = db.foo;
-
-var i = 0;
-for (var j = 0; j < 30; j++) {
- print("j:" + j + " : " +
- Date.timeFunc(function() {
- var bulk = coll.initializeUnorderedBulkOp();
- for (var k = 0; k < 100; k++) {
- bulk.insert({ num : i, s : bigString });
- i++;
- }
- assert.writeOK(bulk.execute());
- }));
-}
+ var s = new ShardingTest({name: "auto2", shards: 2, mongos: 2});
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
-s.startBalancer();
+ var bigString = "";
+ while (bigString.length < 1024 * 50) {
+ bigString += "asocsancdnsjfnsdnfsjdhfasdfasdfasdfnsadofnsadlkfnsaldknfsad";
+ }
-assert.eq( i , j * 100 , "setup" );
+ var db = s.getDB("test");
+ var coll = db.foo;
+
+ var i = 0;
+ for (var j = 0; j < 30; j++) {
+ print("j:" + j + " : " +
+ Date.timeFunc(function() {
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var k = 0; k < 100; k++) {
+ bulk.insert({num: i, s: bigString});
+ i++;
+ }
+ assert.writeOK(bulk.execute());
+ }));
+ }
-// Until SERVER-9715 is fixed, the sync command must be run on a diff connection
-new Mongo(s.s.host).adminCommand("connpoolsync");
+ s.startBalancer();
-print("done inserting data" );
+ assert.eq(i, j * 100, "setup");
-print("datasize: " + tojson( s.getPrimaryShard("test" ).getDB("admin" )
- .runCommand( { datasize : "test.foo" } ) ) );
-s.printChunks();
+ // Until SERVER-9715 is fixed, the sync command must be run on a diff connection
+ new Mongo(s.s.host).adminCommand("connpoolsync");
-function doCountsGlobal(){
- counta = s._connections[0].getDB("test" ).foo.count();
- countb = s._connections[1].getDB("test" ).foo.count();
- return counta + countb;
-}
+ print("done inserting data");
-// Wait for the chunks to distribute
-assert.soon( function(){
- doCountsGlobal();
- print("Counts: " + counta + countb);
+ print("datasize: " +
+ tojson(s.getPrimaryShard("test").getDB("admin").runCommand({datasize: "test.foo"})));
+ s.printChunks();
- return counta > 0 && countb > 0;
-});
+ function doCountsGlobal() {
+ counta = s._connections[0].getDB("test").foo.count();
+ countb = s._connections[1].getDB("test").foo.count();
+ return counta + countb;
+ }
+ // Wait for the chunks to distribute
+ assert.soon(function() {
+ doCountsGlobal();
+ print("Counts: " + counta + countb);
-print("checkpoint B" );
+ return counta > 0 && countb > 0;
+ });
-var missing = [];
+ print("checkpoint B");
-for ( i=0; i<j*100; i++ ){
- var x = coll.findOne( { num : i } );
- if ( ! x ){
- missing.push( i );
- print("can't find: " + i );
- sleep( 5000 );
- x = coll.findOne( { num : i } );
- if ( ! x ){
- print("still can't find: " + i );
+ var missing = [];
- for ( var zzz=0; zzz<s._connections.length; zzz++ ){
- if ( s._connections[zzz].getDB("test" ).foo.findOne( { num : i } ) ){
- print("found on wrong server: " + s._connections[zzz] );
+ for (i = 0; i < j * 100; i++) {
+ var x = coll.findOne({num: i});
+ if (!x) {
+ missing.push(i);
+ print("can't find: " + i);
+ sleep(5000);
+ x = coll.findOne({num: i});
+ if (!x) {
+ print("still can't find: " + i);
+
+ for (var zzz = 0; zzz < s._connections.length; zzz++) {
+ if (s._connections[zzz].getDB("test").foo.findOne({num: i})) {
+ print("found on wrong server: " + s._connections[zzz]);
+ }
}
}
-
}
}
-}
-
-s.printChangeLog();
-
-print("missing: " + tojson( missing ) );
-assert.soon( function(z){ return doCountsGlobal() == j * 100; } , "from each a:" + counta + " b:" + countb + " i:" + i );
-print("checkpoint B.a" );
-s.printChunks();
-assert.eq( j * 100 , coll.find().limit(100000000).itcount() , "itcount A" );
-assert.eq( j * 100 , counta + countb , "from each 2 a:" + counta + " b:" + countb + " i:" + i );
-assert( missing.length == 0 , "missing : " + tojson( missing ) );
-
-print("checkpoint C" );
-
-assert( Array.unique( s.config.chunks.find().toArray().map( function(z){ return z.shard; } ) ).length == 2 , "should be using both servers" );
-
-for ( i=0; i<100; i++ ){
- cursor = coll.find().batchSize(5);
- cursor.next();
- cursor = null;
- gc();
-}
-
-print("checkpoint D");
-
-// test not-sharded cursors
-db = s.getDB("test2" );
-t = db.foobar;
-for ( i =0; i<100; i++ )
- t.save( { _id : i } );
-for ( i=0; i<100; i++ ){
- t.find().batchSize( 2 ).next();
- assert.lt(0 , db.serverStatus().metrics.cursor.open.total, "cursor1");
- gc();
-}
-
-for ( i=0; i<100; i++ ){
- gc();
-}
-assert.eq(0, db.serverStatus().metrics.cursor.open.total, "cursor2");
-
-// Stop the balancer, otherwise it may grab some connections from the pool for itself
-s.stopBalancer();
-
-print("checkpoint E");
-
-assert( t.findOne() , "check close 0" );
-
-for (i = 0; i < 20; i++) {
- var conn = new Mongo( db.getMongo().host );
- temp2 = conn.getDB("test2" ).foobar;
- assert.eq( conn._fullNameSpace , t._fullNameSpace , "check close 1" );
- assert( temp2.findOne() , "check close 2" );
- conn = null;
- gc();
-}
-
-print("checkpoint F");
-
-assert.throws(function() {
- s.getDB("test" ).foo.find().sort({ s : 1 }).forEach(function(x) {
- printjsononeline(x.substring(0, x.length > 30 ? 30 : x.length));
+
+ s.printChangeLog();
+
+ print("missing: " + tojson(missing));
+ assert.soon(function(z) {
+ return doCountsGlobal() == j * 100;
+ }, "from each a:" + counta + " b:" + countb + " i:" + i);
+ print("checkpoint B.a");
+ s.printChunks();
+ assert.eq(j * 100, coll.find().limit(100000000).itcount(), "itcount A");
+ assert.eq(j * 100, counta + countb, "from each 2 a:" + counta + " b:" + countb + " i:" + i);
+ assert(missing.length == 0, "missing : " + tojson(missing));
+
+ print("checkpoint C");
+
+ assert(Array.unique(s.config.chunks.find().toArray().map(function(z) {
+ return z.shard;
+ })).length == 2,
+ "should be using both servers");
+
+ for (i = 0; i < 100; i++) {
+ cursor = coll.find().batchSize(5);
+ cursor.next();
+ cursor = null;
+ gc();
+ }
+
+ print("checkpoint D");
+
+ // test not-sharded cursors
+ db = s.getDB("test2");
+ t = db.foobar;
+ for (i = 0; i < 100; i++)
+ t.save({_id: i});
+ for (i = 0; i < 100; i++) {
+ t.find().batchSize(2).next();
+ assert.lt(0, db.serverStatus().metrics.cursor.open.total, "cursor1");
+ gc();
+ }
+
+ for (i = 0; i < 100; i++) {
+ gc();
+ }
+ assert.eq(0, db.serverStatus().metrics.cursor.open.total, "cursor2");
+
+ // Stop the balancer, otherwise it may grab some connections from the pool for itself
+ s.stopBalancer();
+
+ print("checkpoint E");
+
+ assert(t.findOne(), "check close 0");
+
+ for (i = 0; i < 20; i++) {
+ var conn = new Mongo(db.getMongo().host);
+ temp2 = conn.getDB("test2").foobar;
+ assert.eq(conn._fullNameSpace, t._fullNameSpace, "check close 1");
+ assert(temp2.findOne(), "check close 2");
+ conn = null;
+ gc();
+ }
+
+ print("checkpoint F");
+
+ assert.throws(function() {
+ s.getDB("test").foo.find().sort({s: 1}).forEach(function(x) {
+ printjsononeline(x.substring(0, x.length > 30 ? 30 : x.length));
+ });
});
-});
-print("checkpoint G");
+ print("checkpoint G");
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/auto_rebalance.js b/jstests/sharding/auto_rebalance.js
index f994404ccc5..826e979d68b 100644
--- a/jstests/sharding/auto_rebalance.js
+++ b/jstests/sharding/auto_rebalance.js
@@ -2,56 +2,46 @@
// shards
(function() {
-'use strict';
-
-var st = new ShardingTest({ name: 'auto_rebalance_rs',
- mongos: 1,
- shards: 2,
- chunksize: 1,
- rs: {
- nodes: 3
- }
- });
-
-assert.writeOK(st.getDB( "config" ).settings.update(
- { _id: "balancer" },
- { $set: { "_secondaryThrottle" : false } },
- { upsert: true }));
-
-st.getDB("admin").runCommand({enableSharding : "TestDB_auto_rebalance_rs"});
-st.getDB("admin").runCommand({shardCollection : "TestDB_auto_rebalance_rs.foo", key : {x : 1}});
-
-var dbTest = st.getDB("TestDB_auto_rebalance_rs");
-
-var num = 100000;
-var bulk = dbTest.foo.initializeUnorderedBulkOp();
-for (var i = 0; i < num; i++) {
- bulk.insert({ _id: i, x: i, abc: "defg", date: new Date(), str: "all the talk on the market" });
-}
-assert.writeOK(bulk.execute());
-
-// Wait for the rebalancing to kick in
-st.startBalancer(60000);
-
-assert.soon(function() {
- var s1Chunks = st.getDB("config").chunks.count({shard : "auto_rebalance_rs-rs0"});
- var s2Chunks = st.getDB("config").chunks.count({shard : "auto_rebalance_rs-rs1"});
- var total = st.getDB("config").chunks.count({ns : "TestDB_auto_rebalance_rs.foo"});
-
- print("chunks: " + s1Chunks + " " + s2Chunks + " " + total);
-
- return s1Chunks > 0 && s2Chunks > 0 && (s1Chunks + s2Chunks == total);
- },
- "Chunks failed to balance",
- 60000,
- 5000);
-
-// Ensure the range deleter quiesces
-st.rs0.awaitReplication(120000);
-st.rs1.awaitReplication(120000);
-
-// TODO: mongod only exits with MongoRunner.EXIT_ABRUPT in sharding_legacy_op_query_WT
-// this should be fixed by SERVER-22176
-st.stop({ allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] });
+ 'use strict';
+
+ var st = new ShardingTest(
+ {name: 'auto_rebalance_rs', mongos: 1, shards: 2, chunksize: 1, rs: {nodes: 3}});
+
+ assert.writeOK(st.getDB("config").settings.update(
+ {_id: "balancer"}, {$set: {"_secondaryThrottle": false}}, {upsert: true}));
+
+ st.getDB("admin").runCommand({enableSharding: "TestDB_auto_rebalance_rs"});
+ st.getDB("admin").runCommand({shardCollection: "TestDB_auto_rebalance_rs.foo", key: {x: 1}});
+
+ var dbTest = st.getDB("TestDB_auto_rebalance_rs");
+
+ var num = 100000;
+ var bulk = dbTest.foo.initializeUnorderedBulkOp();
+ for (var i = 0; i < num; i++) {
+ bulk.insert(
+ {_id: i, x: i, abc: "defg", date: new Date(), str: "all the talk on the market"});
+ }
+ assert.writeOK(bulk.execute());
+
+ // Wait for the rebalancing to kick in
+ st.startBalancer(60000);
+
+ assert.soon(function() {
+ var s1Chunks = st.getDB("config").chunks.count({shard: "auto_rebalance_rs-rs0"});
+ var s2Chunks = st.getDB("config").chunks.count({shard: "auto_rebalance_rs-rs1"});
+ var total = st.getDB("config").chunks.count({ns: "TestDB_auto_rebalance_rs.foo"});
+
+ print("chunks: " + s1Chunks + " " + s2Chunks + " " + total);
+
+ return s1Chunks > 0 && s2Chunks > 0 && (s1Chunks + s2Chunks == total);
+ }, "Chunks failed to balance", 60000, 5000);
+
+ // Ensure the range deleter quiesces
+ st.rs0.awaitReplication(120000);
+ st.rs1.awaitReplication(120000);
+
+ // TODO: mongod only exits with MongoRunner.EXIT_ABRUPT in sharding_legacy_op_query_WT
+ // this should be fixed by SERVER-22176
+ st.stop({allowedExitCodes: [MongoRunner.EXIT_ABRUPT]});
})();
diff --git a/jstests/sharding/autodiscover_config_rs_from_secondary.js b/jstests/sharding/autodiscover_config_rs_from_secondary.js
index d0c4e84d8bd..5d7aa1f7dca 100644
--- a/jstests/sharding/autodiscover_config_rs_from_secondary.js
+++ b/jstests/sharding/autodiscover_config_rs_from_secondary.js
@@ -3,44 +3,44 @@
(function() {
-'use strict';
-
-var rst = new ReplSetTest({name : "configRS",
- nodes: 3,
- nodeOptions: {configsvr: "", storageEngine: "wiredTiger"}});
-rst.startSet();
-var conf = rst.getReplSetConfig();
-conf.members[1].priority = 0;
-conf.members[2].priority = 0;
-rst.initiate(conf);
-
-var seedList = rst.name + "/" + rst.nodes[1].host; // node 1 is guaranteed to not be primary
-{
- // Ensure that mongos can start up when given the CSRS secondary, discover the primary, and
- // perform writes to the config servers.
+ 'use strict';
+
+ var rst = new ReplSetTest(
+ {name: "configRS", nodes: 3, nodeOptions: {configsvr: "", storageEngine: "wiredTiger"}});
+ rst.startSet();
+ var conf = rst.getReplSetConfig();
+ conf.members[1].priority = 0;
+ conf.members[2].priority = 0;
+ rst.initiate(conf);
+
+ var seedList = rst.name + "/" + rst.nodes[1].host; // node 1 is guaranteed to not be primary
+ {
+ // Ensure that mongos can start up when given the CSRS secondary, discover the primary, and
+ // perform writes to the config servers.
+ var mongos = MongoRunner.runMongos({configdb: seedList});
+ var admin = mongos.getDB('admin');
+ assert.writeOK(admin.foo.insert({a: 1}));
+ assert.eq(1, admin.foo.findOne().a);
+ MongoRunner.stopMongos(mongos);
+ }
+
+ // Wait for replication to all config server replica set members to ensure that mongos
+ // doesn't read from a stale config server when trying to verify if the initial cluster metadata
+ // has been properly written.
+ rst.awaitReplication();
+ // Now take down the one electable node
+ rst.stop(0);
+ rst.awaitNoPrimary();
+
+ // Start a mongos when there is no primary
var mongos = MongoRunner.runMongos({configdb: seedList});
+ // Take down the one node the mongos knew about to ensure that it autodiscovered the one
+ // remaining
+ // config server
+ rst.stop(1);
+
var admin = mongos.getDB('admin');
- assert.writeOK(admin.foo.insert({a:1}));
+ mongos.setSlaveOk(true);
assert.eq(1, admin.foo.findOne().a);
- MongoRunner.stopMongos(mongos);
-}
-
-// Wait for replication to all config server replica set members to ensure that mongos
-// doesn't read from a stale config server when trying to verify if the initial cluster metadata
-// has been properly written.
-rst.awaitReplication();
-// Now take down the one electable node
-rst.stop(0);
-rst.awaitNoPrimary();
-
-// Start a mongos when there is no primary
-var mongos = MongoRunner.runMongos({configdb: seedList});
-// Take down the one node the mongos knew about to ensure that it autodiscovered the one remaining
-// config server
-rst.stop(1);
-
-var admin = mongos.getDB('admin');
-mongos.setSlaveOk(true);
-assert.eq(1, admin.foo.findOne().a);
})();
diff --git a/jstests/sharding/autosplit_heuristics.js b/jstests/sharding/autosplit_heuristics.js
index cb5e1260cb9..c4d415ce0de 100644
--- a/jstests/sharding/autosplit_heuristics.js
+++ b/jstests/sharding/autosplit_heuristics.js
@@ -3,83 +3,81 @@
// works as expected even after splitting.
//
-var st = new ShardingTest({ shards : 1,
- mongos : 1,
- other : { mongosOptions : { chunkSize : 1, verbose : 2 }}});
+var st =
+ new ShardingTest({shards: 1, mongos: 1, other: {mongosOptions: {chunkSize: 1, verbose: 2}}});
-// The balancer is by default stopped, thus it will NOT interfere unpredictably with the chunk
+// The balancer is by default stopped, thus it will NOT interfere unpredictably with the chunk
// moves/splits depending on the timing.
// Test is not valid for debug build, heuristics get all mangled by debug reload behavior
-var isDebugBuild = st.s0.getDB( "admin" ).serverBuildInfo().debug;
+var isDebugBuild = st.s0.getDB("admin").serverBuildInfo().debug;
-if ( !isDebugBuild ) {
+if (!isDebugBuild) {
+ var mongos = st.s0;
+ var config = mongos.getDB("config");
+ var admin = mongos.getDB("admin");
+ var coll = mongos.getCollection("foo.hashBar");
-var mongos = st.s0;
-var config = mongos.getDB("config");
-var admin = mongos.getDB("admin");
-var coll = mongos.getCollection("foo.hashBar");
+ printjson(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ printjson(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
-printjson(admin.runCommand({ enableSharding : coll.getDB() + "" }));
-printjson(admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }));
+ var numChunks = 10;
-var numChunks = 10;
+ // Split off the low and high chunks, to get non-special-case behavior
+ printjson(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+ printjson(admin.runCommand({split: coll + "", middle: {_id: numChunks + 1}}));
-// Split off the low and high chunks, to get non-special-case behavior
-printjson( admin.runCommand({ split : coll + "", middle : { _id : 0 } }) );
-printjson( admin.runCommand({ split : coll + "", middle : { _id : numChunks + 1 } }) );
+ // Split all the other chunks, and an extra chunk
+ // We need the extra chunk to compensate for the fact that the chunk differ resets the highest
+ // chunk's (i.e. the last-split-chunk's) data count on reload.
+ for (var i = 1; i < numChunks + 1; i++) {
+ printjson(admin.runCommand({split: coll + "", middle: {_id: i}}));
+ }
-// Split all the other chunks, and an extra chunk
-// We need the extra chunk to compensate for the fact that the chunk differ resets the highest
-// chunk's (i.e. the last-split-chunk's) data count on reload.
-for (var i = 1; i < numChunks + 1; i++) {
- printjson( admin.runCommand({ split : coll + "", middle : { _id : i } }) );
-}
-
-jsTest.log("Setup collection...");
-st.printShardingStatus(true);
+ jsTest.log("Setup collection...");
+ st.printShardingStatus(true);
-var approxSize = Object.bsonsize({ _id : 0.0 });
+ var approxSize = Object.bsonsize({_id: 0.0});
-jsTest.log("Starting inserts of approx size: " + approxSize + "...");
+ jsTest.log("Starting inserts of approx size: " + approxSize + "...");
-var chunkSizeBytes = 1024 * 1024;
+ var chunkSizeBytes = 1024 * 1024;
-// We insert slightly more than the max number of docs per chunk, to test
-// if resetting the chunk size happens during reloads. If the size is
-// reset, we'd expect to split less, since the first split would then
-// disable further splits (statistically, since the decision is randomized).
-// We choose 1.4 since split attempts happen about once every 1/5 chunksize,
-// and we want to be sure we def get a split attempt at a full chunk.
-var insertsForSplit = Math.ceil((chunkSizeBytes * 1.4) / approxSize);
-var totalInserts = insertsForSplit * numChunks;
+ // We insert slightly more than the max number of docs per chunk, to test
+ // if resetting the chunk size happens during reloads. If the size is
+ // reset, we'd expect to split less, since the first split would then
+ // disable further splits (statistically, since the decision is randomized).
+ // We choose 1.4 since split attempts happen about once every 1/5 chunksize,
+ // and we want to be sure we def get a split attempt at a full chunk.
+ var insertsForSplit = Math.ceil((chunkSizeBytes * 1.4) / approxSize);
+ var totalInserts = insertsForSplit * numChunks;
-printjson({ chunkSizeBytes : chunkSizeBytes,
- insertsForSplit : insertsForSplit,
- totalInserts : totalInserts });
+ printjson({
+ chunkSizeBytes: chunkSizeBytes,
+ insertsForSplit: insertsForSplit,
+ totalInserts: totalInserts
+ });
-// Insert enough docs to trigger splits into all chunks
-var bulk = coll.initializeUnorderedBulkOp();
-for (var i = 0; i < totalInserts; i++) {
- bulk.insert({ _id : i % numChunks + (i / totalInserts) });
-}
-assert.writeOK(bulk.execute());
+ // Insert enough docs to trigger splits into all chunks
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < totalInserts; i++) {
+ bulk.insert({_id: i % numChunks + (i / totalInserts)});
+ }
+ assert.writeOK(bulk.execute());
-jsTest.log("Inserts completed...");
+ jsTest.log("Inserts completed...");
-st.printShardingStatus(true);
-printjson(coll.stats());
+ st.printShardingStatus(true);
+ printjson(coll.stats());
-// Check that all chunks (except the two extreme chunks)
-// have been split at least once + 1 extra chunk as reload buffer
-assert.gte(config.chunks.count(), numChunks * 2 + 3);
+ // Check that all chunks (except the two extreme chunks)
+ // have been split at least once + 1 extra chunk as reload buffer
+ assert.gte(config.chunks.count(), numChunks * 2 + 3);
-jsTest.log("DONE!");
+ jsTest.log("DONE!");
-}
-else {
- jsTest.log( "Disabled test in debug builds." );
+} else {
+ jsTest.log("Disabled test in debug builds.");
}
st.stop();
-
diff --git a/jstests/sharding/balance_repl.js b/jstests/sharding/balance_repl.js
index 85d029fce72..433e8167829 100644
--- a/jstests/sharding/balance_repl.js
+++ b/jstests/sharding/balance_repl.js
@@ -3,61 +3,59 @@
//
(function() {
-"use strict";
-
-// The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries
-// from stepping down during migrations on slow evergreen builders.
-var s = new ShardingTest({ shards: 2,
- other: {
- chunkSize: 1,
- rs0: {
- nodes: [
- {rsConfig: {votes: 1}},
- {rsConfig: {priority: 0, votes: 0}},
- ],
- },
- rs1: {
- nodes: [
- {rsConfig: {votes: 1}},
- {rsConfig: {priority: 0, votes: 0}},
- ],
- }
- } });
-
-var db = s.getDB( "test" );
-var bulk = db.foo.initializeUnorderedBulkOp();
-for (var i = 0; i < 2100; i++) {
- bulk.insert({ _id: i, x: i });
-}
-assert.writeOK(bulk.execute());
-
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'test-rs0');
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
-
-for ( i=0; i<20; i++ )
- s.adminCommand( { split : "test.foo" , middle : { _id : i * 100 } } );
-
-assert.eq( 2100, db.foo.find().itcount() );
-var coll = db.foo;
-coll.setSlaveOk();
-
-var dbPrimaryShardId = s.getPrimaryShardIdForDatabase( "test" );
-var other = s.config.shards.findOne( { _id : { $ne : dbPrimaryShardId } } );
-
-for ( i=0; i<20; i++ ) {
- // Needs to waitForDelete because we'll be performing a slaveOk query,
- // and secondaries don't have a chunk manager so it doesn't know how to
- // filter out docs it doesn't own.
- assert(s.adminCommand({ moveChunk: "test.foo",
- find: { _id: i * 100 },
- to : other._id,
- _secondaryThrottle: true,
- writeConcern: { w: 2 },
- _waitForDelete: true }));
- assert.eq( 2100, coll.find().itcount() );
-}
-
-s.stop();
+ "use strict";
+
+ // The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries
+ // from stepping down during migrations on slow evergreen builders.
+ var s = new ShardingTest({
+ shards: 2,
+ other: {
+ chunkSize: 1,
+ rs0: {
+ nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}, ],
+ },
+ rs1: {
+ nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}, ],
+ }
+ }
+ });
+
+ var db = s.getDB("test");
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ for (var i = 0; i < 2100; i++) {
+ bulk.insert({_id: i, x: i});
+ }
+ assert.writeOK(bulk.execute());
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'test-rs0');
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+
+ for (i = 0; i < 20; i++)
+ s.adminCommand({split: "test.foo", middle: {_id: i * 100}});
+
+ assert.eq(2100, db.foo.find().itcount());
+ var coll = db.foo;
+ coll.setSlaveOk();
+
+ var dbPrimaryShardId = s.getPrimaryShardIdForDatabase("test");
+ var other = s.config.shards.findOne({_id: {$ne: dbPrimaryShardId}});
+
+ for (i = 0; i < 20; i++) {
+ // Needs to waitForDelete because we'll be performing a slaveOk query,
+ // and secondaries don't have a chunk manager so it doesn't know how to
+ // filter out docs it doesn't own.
+ assert(s.adminCommand({
+ moveChunk: "test.foo",
+ find: {_id: i * 100},
+ to: other._id,
+ _secondaryThrottle: true,
+ writeConcern: {w: 2},
+ _waitForDelete: true
+ }));
+ assert.eq(2100, coll.find().itcount());
+ }
+
+ s.stop();
}());
diff --git a/jstests/sharding/balance_tags1.js b/jstests/sharding/balance_tags1.js
index 19d55bb0270..c1177ac4661 100644
--- a/jstests/sharding/balance_tags1.js
+++ b/jstests/sharding/balance_tags1.js
@@ -1,27 +1,24 @@
// Test balancing all chunks off of one shard
-var st = new ShardingTest({ name: "balance_tags1",
- shards: 3,
- mongos: 1,
- other: { chunkSize: 1,
- enableBalancer : true } });
+var st = new ShardingTest(
+ {name: "balance_tags1", shards: 3, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-st.adminCommand({ enablesharding: "test" });
+st.adminCommand({enablesharding: "test"});
st.ensurePrimaryShard('test', 'shard0001');
var db = st.getDB("test");
var bulk = db.foo.initializeUnorderedBulkOp();
for (i = 0; i < 21; i++) {
- bulk.insert({ _id: i, x: i });
+ bulk.insert({_id: i, x: i});
}
assert.writeOK(bulk.execute());
-assert.commandWorked(st.s.adminCommand({ shardCollection: 'test.foo', key: { _id : 1 } }));
+assert.commandWorked(st.s.adminCommand({shardCollection: 'test.foo', key: {_id: 1}}));
st.stopBalancer();
for (i = 0; i < 20; i++) {
- st.adminCommand({ split : "test.foo", middle : { _id : i } });
+ st.adminCommand({split: "test.foo", middle: {_id: i}});
}
st.startBalancer();
@@ -30,39 +27,32 @@ st.printShardingStatus();
// Wait for the initial balance to happen
assert.soon(function() {
- var counts = st.chunkCounts("foo");
- printjson(counts);
- return counts["shard0000"] == 7 &&
- counts["shard0001"] == 7 &&
- counts["shard0002"] == 7;
- },
- "balance 1 didn't happen",
- 1000 * 60 * 10,
- 1000);
+ var counts = st.chunkCounts("foo");
+ printjson(counts);
+ return counts["shard0000"] == 7 && counts["shard0001"] == 7 && counts["shard0002"] == 7;
+}, "balance 1 didn't happen", 1000 * 60 * 10, 1000);
// Quick test of some shell helpers and setting up state
sh.addShardTag("shard0000", "a");
-assert.eq([ "a" ] , st.config.shards.findOne({ _id : "shard0000" }).tags);
+assert.eq(["a"], st.config.shards.findOne({_id: "shard0000"}).tags);
sh.addShardTag("shard0000", "b");
-assert.eq([ "a" , "b" ], st.config.shards.findOne({ _id : "shard0000" }).tags);
+assert.eq(["a", "b"], st.config.shards.findOne({_id: "shard0000"}).tags);
sh.removeShardTag("shard0000", "b");
-assert.eq([ "a" ], st.config.shards.findOne( { _id : "shard0000" } ).tags);
+assert.eq(["a"], st.config.shards.findOne({_id: "shard0000"}).tags);
-sh.addShardTag("shard0001" , "a");
-sh.addTagRange("test.foo" , { _id : -1 } , { _id : 1000 } , "a");
+sh.addShardTag("shard0001", "a");
+sh.addTagRange("test.foo", {_id: -1}, {_id: 1000}, "a");
st.printShardingStatus();
// At this point, everything should drain off shard 2, which does not have the tag
assert.soon(function() {
- var counts = st.chunkCounts("foo");
- printjson(counts);
- return counts["shard0002"] == 0;
- },
- "balance 2 didn't happen",
- 1000 * 60 * 10 , 1000);
+ var counts = st.chunkCounts("foo");
+ printjson(counts);
+ return counts["shard0002"] == 0;
+}, "balance 2 didn't happen", 1000 * 60 * 10, 1000);
st.printShardingStatus();
diff --git a/jstests/sharding/balance_tags2.js b/jstests/sharding/balance_tags2.js
index 0bcedf97e33..8c54b2f3fc6 100644
--- a/jstests/sharding/balance_tags2.js
+++ b/jstests/sharding/balance_tags2.js
@@ -1,27 +1,24 @@
// Test balancing all chunks to one shard by tagging the full shard-key range on that collection
-var s = new ShardingTest({ name: "balance_tags2",
- shards: 3,
- mongos: 1,
- other: { chunkSize: 1,
- enableBalancer : true } });
+var s = new ShardingTest(
+ {name: "balance_tags2", shards: 3, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-s.adminCommand({ enablesharding: "test" });
+s.adminCommand({enablesharding: "test"});
s.ensurePrimaryShard('test', 'shard0001');
var db = s.getDB("test");
var bulk = db.foo.initializeUnorderedBulkOp();
for (i = 0; i < 21; i++) {
- bulk.insert({ _id: i, x: i });
+ bulk.insert({_id: i, x: i});
}
assert.writeOK(bulk.execute());
-sh.shardCollection("test.foo", { _id : 1 });
+sh.shardCollection("test.foo", {_id: 1});
sh.stopBalancer();
for (i = 0; i < 20; i++) {
- sh.splitAt("test.foo", {_id : i});
+ sh.splitAt("test.foo", {_id: i});
}
sh.startBalancer();
@@ -30,35 +27,26 @@ s.printShardingStatus(true);
// Wait for the initial balance to happen
assert.soon(function() {
- var counts = s.chunkCounts("foo");
- printjson(counts);
- return counts["shard0000"] == 7 &&
- counts["shard0001"] == 7 &&
- counts["shard0002"] == 7;
- },
- "balance 1 didn't happen",
- 1000 * 60 * 10,
- 1000);
+ var counts = s.chunkCounts("foo");
+ printjson(counts);
+ return counts["shard0000"] == 7 && counts["shard0001"] == 7 && counts["shard0002"] == 7;
+}, "balance 1 didn't happen", 1000 * 60 * 10, 1000);
// Tag one shard
-sh.addShardTag("shard0000" , "a");
-assert.eq([ "a" ] , s.config.shards.findOne({ _id : "shard0000" }).tags);
+sh.addShardTag("shard0000", "a");
+assert.eq(["a"], s.config.shards.findOne({_id: "shard0000"}).tags);
// Tag the whole collection (ns) to one shard
-sh.addTagRange("test.foo", { _id : MinKey }, { _id : MaxKey }, "a");
+sh.addTagRange("test.foo", {_id: MinKey}, {_id: MaxKey}, "a");
// Wait for things to move to that one shard
s.printShardingStatus(true);
assert.soon(function() {
- var counts = s.chunkCounts("foo");
- printjson(counts);
- return counts["shard0001"] == 0 &&
- counts["shard0002"] == 0;
- },
- "balance 2 didn't happen",
- 1000 * 60 * 10,
- 1000);
+ var counts = s.chunkCounts("foo");
+ printjson(counts);
+ return counts["shard0001"] == 0 && counts["shard0002"] == 0;
+}, "balance 2 didn't happen", 1000 * 60 * 10, 1000);
s.printShardingStatus(true);
diff --git a/jstests/sharding/basic_drop_coll.js b/jstests/sharding/basic_drop_coll.js
index af9008cfcac..568d2da9443 100644
--- a/jstests/sharding/basic_drop_coll.js
+++ b/jstests/sharding/basic_drop_coll.js
@@ -3,52 +3,51 @@
* cleanuped up properly.
*/
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({ shards: 2 });
+ var st = new ShardingTest({shards: 2});
-var testDB = st.s.getDB('test');
+ var testDB = st.s.getDB('test');
-// Test dropping an unsharded collection.
+ // Test dropping an unsharded collection.
-assert.writeOK(testDB.bar.insert({ x: 1 }));
-assert.neq(null, testDB.bar.findOne({ x: 1 }));
+ assert.writeOK(testDB.bar.insert({x: 1}));
+ assert.neq(null, testDB.bar.findOne({x: 1}));
-assert.commandWorked(testDB.runCommand({ drop: 'bar' }));
-assert.eq(null, testDB.bar.findOne({ x: 1 }));
+ assert.commandWorked(testDB.runCommand({drop: 'bar'}));
+ assert.eq(null, testDB.bar.findOne({x: 1}));
-// Test dropping a sharded collection.
+ // Test dropping a sharded collection.
-assert.commandWorked(st.s.adminCommand({ enableSharding: 'test' }));
-st.ensurePrimaryShard('test', 'shard0000');
-st.s.adminCommand({ shardCollection: 'test.user', key: { _id: 1 }});
-st.s.adminCommand({ split: 'test.user', middle: { _id: 0 }});
-assert.commandWorked(st.s.adminCommand({ moveChunk: 'test.user',
- find: { _id: 0 },
- to: 'shard0001' }));
+ assert.commandWorked(st.s.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', 'shard0000');
+ st.s.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
+ st.s.adminCommand({split: 'test.user', middle: {_id: 0}});
+ assert.commandWorked(
+ st.s.adminCommand({moveChunk: 'test.user', find: {_id: 0}, to: 'shard0001'}));
-assert.writeOK(testDB.user.insert({ _id: 10 }));
-assert.writeOK(testDB.user.insert({ _id: -10 }));
+ assert.writeOK(testDB.user.insert({_id: 10}));
+ assert.writeOK(testDB.user.insert({_id: -10}));
-assert.neq(null, st.d0.getDB('test').user.findOne({ _id: -10 }));
-assert.neq(null, st.d1.getDB('test').user.findOne({ _id: 10 }));
+ assert.neq(null, st.d0.getDB('test').user.findOne({_id: -10}));
+ assert.neq(null, st.d1.getDB('test').user.findOne({_id: 10}));
-var configDB = st.s.getDB('config');
-var collDoc = configDB.collections.findOne({ _id: 'test.user' });
-assert(!collDoc.dropped);
+ var configDB = st.s.getDB('config');
+ var collDoc = configDB.collections.findOne({_id: 'test.user'});
+ assert(!collDoc.dropped);
-assert.eq(2, configDB.chunks.count({ ns: 'test.user' }));
+ assert.eq(2, configDB.chunks.count({ns: 'test.user'}));
-assert.commandWorked(testDB.runCommand({ drop: 'user' }));
+ assert.commandWorked(testDB.runCommand({drop: 'user'}));
-assert.eq(null, st.d0.getDB('test').user.findOne());
-assert.eq(null, st.d1.getDB('test').user.findOne());
+ assert.eq(null, st.d0.getDB('test').user.findOne());
+ assert.eq(null, st.d1.getDB('test').user.findOne());
-collDoc = configDB.collections.findOne({ _id: 'test.user' });
-assert(collDoc.dropped);
+ collDoc = configDB.collections.findOne({_id: 'test.user'});
+ assert(collDoc.dropped);
-assert.eq(0, configDB.chunks.count({ ns: 'test.user' }));
+ assert.eq(0, configDB.chunks.count({ns: 'test.user'}));
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/basic_sharding_params.js b/jstests/sharding/basic_sharding_params.js
index 7b23666e702..f21c4d17784 100644
--- a/jstests/sharding/basic_sharding_params.js
+++ b/jstests/sharding/basic_sharding_params.js
@@ -1,31 +1,27 @@
// Test of complex sharding initialization
function shardingTestUsingObjects() {
- var st = new ShardingTest( {
-
- mongos : { s0 : { verbose : 6 }, s1 : { verbose : 5 } },
- config : { c0 : { verbose : 4 } },
- shards : { d0 : { verbose : 3 },
- rs1 : {
- nodes : { d0 : { verbose : 2 },
- a1 : { verbose : 1 } } }
- }
- } );
+ var st = new ShardingTest({
+
+ mongos: {s0: {verbose: 6}, s1: {verbose: 5}},
+ config: {c0: {verbose: 4}},
+ shards: {d0: {verbose: 3}, rs1: {nodes: {d0: {verbose: 2}, a1: {verbose: 1}}}}
+ });
var s0 = st.s0;
- assert.eq( s0, st._mongos[0] );
+ assert.eq(s0, st._mongos[0]);
var s1 = st.s1;
- assert.eq( s1, st._mongos[1] );
+ assert.eq(s1, st._mongos[1]);
var c0 = st.c0;
- assert.eq( c0, st._configServers[0] );
+ assert.eq(c0, st._configServers[0]);
var d0 = st.d0;
- assert.eq( d0, st._connections[0] );
+ assert.eq(d0, st._connections[0]);
var rs1 = st.rs1;
- assert.eq( rs1, st._rsObjects[1] );
+ assert.eq(rs1, st._rsObjects[1]);
var rs1_d0 = rs1.nodes[0];
var rs1_a1 = rs1.nodes[1];
@@ -41,26 +37,26 @@ function shardingTestUsingObjects() {
}
function shardingTestUsingArrays() {
- var st = new ShardingTest( {
- mongos : [{ verbose : 5 }, { verbose : 4 } ],
- config : [{ verbose : 3 }],
- shards : [{ verbose : 2 }, { verbose : 1 } ]
+ var st = new ShardingTest({
+ mongos: [{verbose: 5}, {verbose: 4}],
+ config: [{verbose: 3}],
+ shards: [{verbose: 2}, {verbose: 1}]
});
var s0 = st.s0;
- assert.eq( s0, st._mongos[0] );
+ assert.eq(s0, st._mongos[0]);
var s1 = st.s1;
- assert.eq( s1, st._mongos[1] );
+ assert.eq(s1, st._mongos[1]);
var c0 = st.c0;
- assert.eq( c0, st._configServers[0] );
+ assert.eq(c0, st._configServers[0]);
var d0 = st.d0;
- assert.eq( d0, st._connections[0] );
+ assert.eq(d0, st._connections[0]);
var d1 = st.d1;
- assert.eq( d1, st._connections[1] );
+ assert.eq(d1, st._connections[1]);
assert(s0.commandLine.hasOwnProperty("vvvvv"));
assert(s1.commandLine.hasOwnProperty("vvvv"));
@@ -73,4 +69,3 @@ function shardingTestUsingArrays() {
shardingTestUsingObjects();
shardingTestUsingArrays();
-
diff --git a/jstests/sharding/basic_split.js b/jstests/sharding/basic_split.js
index 0c5f7e0e416..8ddff04007c 100644
--- a/jstests/sharding/basic_split.js
+++ b/jstests/sharding/basic_split.js
@@ -2,91 +2,92 @@
* Perform basic tests for the split command against mongos.
*/
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({ shards: 1, other: { chunkSize: 1 }});
-var configDB = st.s.getDB('config');
+ var st = new ShardingTest({shards: 1, other: {chunkSize: 1}});
+ var configDB = st.s.getDB('config');
-// split on invalid ns.
-assert.commandFailed(configDB.adminCommand({ split: 'user', key: { _id: 1 }}));
+ // split on invalid ns.
+ assert.commandFailed(configDB.adminCommand({split: 'user', key: {_id: 1}}));
-// split on unsharded collection (db is not sharding enabled).
-assert.commandFailed(configDB.adminCommand({ split: 'test.user', key: { _id: 1 }}));
+ // split on unsharded collection (db is not sharding enabled).
+ assert.commandFailed(configDB.adminCommand({split: 'test.user', key: {_id: 1}}));
-configDB.adminCommand({ enableSharding: 'test' });
+ configDB.adminCommand({enableSharding: 'test'});
-// split on unsharded collection (db is sharding enabled).
-assert.commandFailed(configDB.adminCommand({ split: 'test.user', key: { _id: 1 }}));
+ // split on unsharded collection (db is sharding enabled).
+ assert.commandFailed(configDB.adminCommand({split: 'test.user', key: {_id: 1}}));
-assert.commandWorked(configDB.adminCommand({ shardCollection: 'test.user', key: { _id: 1 }}));
+ assert.commandWorked(configDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}}));
-assert.eq(null, configDB.chunks.findOne({ ns: 'test.user', min: { _id: 0 }}));
+ assert.eq(null, configDB.chunks.findOne({ns: 'test.user', min: {_id: 0}}));
-assert.commandWorked(configDB.adminCommand({ split: 'test.user', middle: { _id: 0 }}));
-assert.neq(null, configDB.chunks.findOne({ ns: 'test.user', min: { _id: 0 }}));
+ assert.commandWorked(configDB.adminCommand({split: 'test.user', middle: {_id: 0}}));
+ assert.neq(null, configDB.chunks.findOne({ns: 'test.user', min: {_id: 0}}));
-// Cannot split on existing chunk boundary.
-assert.commandFailed(configDB.adminCommand({ split: 'test.user', middle: { _id: 0 }}));
+ // Cannot split on existing chunk boundary.
+ assert.commandFailed(configDB.adminCommand({split: 'test.user', middle: {_id: 0}}));
-// Attempt to split on a value that is not the shard key.
-assert.commandFailed(configDB.adminCommand({ split: 'test.user', middle: { x: 100 }}));
-assert.commandFailed(configDB.adminCommand({ split: 'test.user', find: { x: 100 }}));
-assert.commandFailed(configDB.adminCommand({ split: 'test.user',
- bounds: [{ x: MinKey }, { x: MaxKey }]}));
+ // Attempt to split on a value that is not the shard key.
+ assert.commandFailed(configDB.adminCommand({split: 'test.user', middle: {x: 100}}));
+ assert.commandFailed(configDB.adminCommand({split: 'test.user', find: {x: 100}}));
+ assert.commandFailed(
+ configDB.adminCommand({split: 'test.user', bounds: [{x: MinKey}, {x: MaxKey}]}));
-// Insert documents large enough to fill up a chunk, but do it directly in the shard in order
-// to bypass the auto-split logic.
-var kiloDoc = new Array(1024).join('x');
-var testDB = st.d0.getDB('test');
-var bulk = testDB.user.initializeUnorderedBulkOp();
-for (var x = -1200; x < 1200; x++) {
- bulk.insert({ _id: x, val: kiloDoc });
-}
-assert.writeOK(bulk.execute());
+ // Insert documents large enough to fill up a chunk, but do it directly in the shard in order
+ // to bypass the auto-split logic.
+ var kiloDoc = new Array(1024).join('x');
+ var testDB = st.d0.getDB('test');
+ var bulk = testDB.user.initializeUnorderedBulkOp();
+ for (var x = -1200; x < 1200; x++) {
+ bulk.insert({_id: x, val: kiloDoc});
+ }
+ assert.writeOK(bulk.execute());
-assert.eq(1, configDB.chunks.find({ ns: 'test.user', min: { $gte: { _id: 0 }}}).itcount());
+ assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount());
-// Errors if bounds do not correspond to existing chunk boundaries.
-assert.commandFailed(configDB.adminCommand({ split: 'test.user',
- bounds: [{ _id: 0 }, { _id: 1000 }]}));
-assert.eq(1, configDB.chunks.find({ ns: 'test.user', min: { $gte: { _id: 0 }}}).itcount());
+ // Errors if bounds do not correspond to existing chunk boundaries.
+ assert.commandFailed(
+ configDB.adminCommand({split: 'test.user', bounds: [{_id: 0}, {_id: 1000}]}));
+ assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount());
-assert.commandWorked(configDB.adminCommand({ split: 'test.user',
- bounds: [{ _id: 0 }, { _id: MaxKey }]}));
-assert.gt(configDB.chunks.find({ ns: 'test.user', min: { $gte: { _id: 0 }}}).itcount(), 1);
+ assert.commandWorked(
+ configDB.adminCommand({split: 'test.user', bounds: [{_id: 0}, {_id: MaxKey}]}));
+ assert.gt(configDB.chunks.find({ns: 'test.user', min: {$gte: {_id: 0}}}).itcount(), 1);
-assert.eq(1, configDB.chunks.find({ ns: 'test.user', min: { $lt: { _id: 0 }}}).itcount());
-assert.commandWorked(configDB.adminCommand({ split: 'test.user', find: { _id: -1 }}));
-assert.gt(configDB.chunks.find({ ns: 'test.user', min: { $lt: { _id: 0 }}}).itcount(), 1);
+ assert.eq(1, configDB.chunks.find({ns: 'test.user', min: {$lt: {_id: 0}}}).itcount());
+ assert.commandWorked(configDB.adminCommand({split: 'test.user', find: {_id: -1}}));
+ assert.gt(configDB.chunks.find({ns: 'test.user', min: {$lt: {_id: 0}}}).itcount(), 1);
-//
-// Compound Key
-//
+ //
+ // Compound Key
+ //
-assert.commandWorked(configDB.adminCommand({ shardCollection: 'test.compound', key: { x: 1, y: 1 }}));
+ assert.commandWorked(
+ configDB.adminCommand({shardCollection: 'test.compound', key: {x: 1, y: 1}}));
-assert.eq(null, configDB.chunks.findOne({ ns: 'test.compound', min: { x: 0, y: 0 }}));
-assert.commandWorked(configDB.adminCommand({ split: 'test.compound', middle: { x: 0, y: 0 }}));
-assert.neq(null, configDB.chunks.findOne({ ns: 'test.compound', min: { x: 0, y: 0 }}));
+ assert.eq(null, configDB.chunks.findOne({ns: 'test.compound', min: {x: 0, y: 0}}));
+ assert.commandWorked(configDB.adminCommand({split: 'test.compound', middle: {x: 0, y: 0}}));
+ assert.neq(null, configDB.chunks.findOne({ns: 'test.compound', min: {x: 0, y: 0}}));
-// cannot split on existing chunk boundary.
-assert.commandFailed(configDB.adminCommand({ split: 'test.compound', middle: { x: 0, y: 0 }}));
+ // cannot split on existing chunk boundary.
+ assert.commandFailed(configDB.adminCommand({split: 'test.compound', middle: {x: 0, y: 0}}));
-bulk = testDB.compound.initializeUnorderedBulkOp();
-for (x = -1200; x < 1200; x++) {
- bulk.insert({ x: x, y: x, val: kiloDoc });
-}
-assert.writeOK(bulk.execute());
+ bulk = testDB.compound.initializeUnorderedBulkOp();
+ for (x = -1200; x < 1200; x++) {
+ bulk.insert({x: x, y: x, val: kiloDoc});
+ }
+ assert.writeOK(bulk.execute());
-assert.eq(1, configDB.chunks.find({ ns: 'test.compound', min: { $gte: { x: 0, y: 0 }}}).itcount());
-assert.commandWorked(configDB.adminCommand({ split: 'test.compound',
- bounds: [{ x: 0, y: 0 }, { x: MaxKey, y: MaxKey }]}));
-assert.gt(configDB.chunks.find({ ns: 'test.compound', min: { $gte: { x: 0, y: 0 }}}).itcount(), 1);
+ assert.eq(1, configDB.chunks.find({ns: 'test.compound', min: {$gte: {x: 0, y: 0}}}).itcount());
+ assert.commandWorked(configDB.adminCommand(
+ {split: 'test.compound', bounds: [{x: 0, y: 0}, {x: MaxKey, y: MaxKey}]}));
+ assert.gt(configDB.chunks.find({ns: 'test.compound', min: {$gte: {x: 0, y: 0}}}).itcount(), 1);
-assert.eq(1, configDB.chunks.find({ ns: 'test.compound', min: { $lt: { x: 0, y: 0 }}}).itcount());
-assert.commandWorked(configDB.adminCommand({ split: 'test.compound', find: { x: -1, y: -1 }}));
-assert.gt(configDB.chunks.find({ ns: 'test.compound', min: { $lt: { x: 0, y: 0 }}}).itcount(), 1);
+ assert.eq(1, configDB.chunks.find({ns: 'test.compound', min: {$lt: {x: 0, y: 0}}}).itcount());
+ assert.commandWorked(configDB.adminCommand({split: 'test.compound', find: {x: -1, y: -1}}));
+ assert.gt(configDB.chunks.find({ns: 'test.compound', min: {$lt: {x: 0, y: 0}}}).itcount(), 1);
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/batch_write_command_sharded.js b/jstests/sharding/batch_write_command_sharded.js
index 2b88228477b..c82035af8c6 100644
--- a/jstests/sharding/batch_write_command_sharded.js
+++ b/jstests/sharding/batch_write_command_sharded.js
@@ -4,247 +4,239 @@
// *only* mongos-specific tests.
//
(function() {
-"use strict";
-
-// Only reason for using localhost name is to make the test consistent with naming host so it
-// will be easier to check for the host name inside error objects.
-var options = {useHostname: false};
-var st = new ShardingTest({shards: 2, mongos: 1, config: 3, other: options});
-st.stopBalancer();
-
-var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var config = mongos.getDB( "config" );
-var shards = config.shards.find().toArray();
-var configConnStr = st._configDB;
-
-jsTest.log("Starting sharding batch write tests...");
-
-var request;
-var result;
-
-// NOTE: ALL TESTS BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
-
-//
-//
-// Mongos _id autogeneration tests for sharded collections
-
-var coll = mongos.getCollection("foo.bar");
-assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB().toString() }));
-st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-assert.commandWorked(admin.runCommand({ shardCollection : coll.toString(),
- key : { _id : 1 } }));
-
-//
-// Basic insert no _id
-coll.remove({});
-printjson( request = {insert : coll.getName(),
- documents: [{ a : 1 }] } );
-printjson( result = coll.runCommand(request) );
-assert(result.ok);
-assert.eq(1, result.n);
-assert.eq(1, coll.count());
-
-//
-// Multi insert some _ids
-coll.remove({});
-printjson( request = {insert : coll.getName(),
- documents: [{ _id : 0, a : 1 }, { a : 2 }] } );
-printjson( result = coll.runCommand(request) );
-assert(result.ok);
-assert.eq(2, result.n);
-assert.eq(2, coll.count());
-assert.eq(1, coll.count({ _id : 0 }));
-
-//
-// Ensure generating many _ids don't push us over limits
-var maxDocSize = (16 * 1024 * 1024) / 1000;
-var baseDocSize = Object.bsonsize({ a : 1, data : "" });
-var dataSize = maxDocSize - baseDocSize;
-
-var data = "";
-for (var i = 0; i < dataSize; i++)
- data += "x";
-
-var documents = [];
-for (var i = 0; i < 1000; i++) documents.push({ a : i, data : data });
-
-assert.commandWorked(coll.getMongo().getDB("admin").runCommand({ setParameter : 1, logLevel : 4 }));
-coll.remove({});
-request = { insert : coll.getName(),
- documents: documents };
-printjson( result = coll.runCommand(request) );
-assert(result.ok);
-assert.eq(1000, result.n);
-assert.eq(1000, coll.count());
-
-//
-//
-// Config server upserts (against admin db, for example) require _id test
-var adminColl = admin.getCollection(coll.getName());
-
-//
-// Without _id
-adminColl.remove({});
-printjson( request = {update : adminColl.getName(),
- updates : [{ q : { a : 1 }, u : { a : 1 }, upsert : true }]});
-var result = adminColl.runCommand(request);
-assert.commandWorked(result);
-assert.eq(1, result.n);
-assert.eq(1, adminColl.count());
-
-//
-// With _id
-adminColl.remove({});
-printjson( request = {update : adminColl.getName(),
- updates : [{ q : { _id : 1, a : 1 }, u : { a : 1 }, upsert : true }]});
-assert.commandWorked(adminColl.runCommand(request));
-assert.eq(1, result.n);
-assert.eq(1, adminColl.count());
-
-//
-//
-// Stale config progress tests
-// Set up a new collection across two shards, then revert the chunks to an earlier state to put
-// mongos and mongod permanently out of sync.
-
-// START SETUP
-var brokenColl = mongos.getCollection( "broken.coll" );
-assert.commandWorked(admin.runCommand({ enableSharding : brokenColl.getDB().toString() }));
-printjson(admin.runCommand({ movePrimary : brokenColl.getDB().toString(), to : shards[0]._id }));
-assert.commandWorked(admin.runCommand({ shardCollection : brokenColl.toString(),
- key : { _id : 1 } }));
-assert.commandWorked(admin.runCommand({ split : brokenColl.toString(),
- middle : { _id : 0 } }));
-
-var oldChunks = config.chunks.find().toArray();
-
-// Start a new mongos and bring it up-to-date with the chunks so far
-
-var staleMongos = MongoRunner.runMongos({ configdb : configConnStr });
-brokenColl = staleMongos.getCollection(brokenColl.toString());
-assert.writeOK(brokenColl.insert({ hello : "world" }));
-
-// Modify the chunks to make shards at a higher version
-
-assert.commandWorked(admin.runCommand({ moveChunk : brokenColl.toString(),
- find : { _id : 0 },
- to : shards[1]._id }));
-
-// Rewrite the old chunks back to the config server
-
-assert.writeOK(config.chunks.remove({}));
-for ( var i = 0; i < oldChunks.length; i++ )
- assert.writeOK(config.chunks.insert(oldChunks[i]));
-
-// Stale mongos can no longer bring itself up-to-date!
-// END SETUP
-
-//
-// Config server insert, repeatedly stale
-printjson( request = {insert : brokenColl.getName(),
- documents: [{_id:-1}]} );
-printjson( result = brokenColl.runCommand(request) );
-assert(result.ok);
-assert.eq(0, result.n);
-assert.eq(1, result.writeErrors.length);
-assert.eq(0, result.writeErrors[0].index);
-assert.eq(result.writeErrors[0].code, 82); // No Progress Made
-
-//
-// Config server insert to other shard, repeatedly stale
-printjson( request = {insert : brokenColl.getName(),
- documents: [{_id:1}]} );
-printjson( result = brokenColl.runCommand(request) );
-assert(result.ok);
-assert.eq(0, result.n);
-assert.eq(1, result.writeErrors.length);
-assert.eq(0, result.writeErrors[0].index);
-assert.eq(result.writeErrors[0].code, 82); // No Progress Made
-
-//
-//
-// Tests against config server
-var configColl = config.getCollection( "batch_write_protocol_sharded" );
-
-//
-// Basic config server insert
-configColl.remove({});
-printjson( request = {insert : configColl.getName(),
- documents: [{a:1}]} );
-var result = configColl.runCommand(request);
-assert.commandWorked(result);
-assert.eq(1, result.n);
-
-st.configRS.awaitReplication();
-assert.eq(1, st.config0.getCollection(configColl + "").count());
-assert.eq(1, st.config1.getCollection(configColl + "").count());
-assert.eq(1, st.config2.getCollection(configColl + "").count());
-
-//
-// Basic config server update
-configColl.remove({});
-configColl.insert({a:1});
-printjson( request = {update : configColl.getName(),
- updates: [{q: {a:1}, u: {$set: {b:2}}}]} );
-printjson( result = configColl.runCommand(request) );
-assert(result.ok);
-assert.eq(1, result.n);
-
-st.configRS.awaitReplication();
-assert.eq(1, st.config0.getCollection(configColl + "").count({b:2}));
-assert.eq(1, st.config1.getCollection(configColl + "").count({b:2}));
-assert.eq(1, st.config2.getCollection(configColl + "").count({b:2}));
-
-//
-// Basic config server delete
-configColl.remove({});
-configColl.insert({a:1});
-printjson( request = {'delete' : configColl.getName(),
- deletes: [{q: {a:1}, limit: 0}]} );
-printjson( result = configColl.runCommand(request) );
-assert(result.ok);
-assert.eq(1, result.n);
-
-st.configRS.awaitReplication();
-assert.eq(0, st.config0.getCollection(configColl + "").count());
-assert.eq(0, st.config1.getCollection(configColl + "").count());
-assert.eq(0, st.config2.getCollection(configColl + "").count());
-
-MongoRunner.stopMongod(st.config1);
-MongoRunner.stopMongod(st.config2);
-st.configRS.awaitNoPrimary();
-
-// Config server insert with no config PRIMARY
-configColl.remove({});
-printjson( request = {insert : configColl.getName(),
- documents: [{a:1}]} );
-printjson( result = configColl.runCommand(request) );
-assert(!result.ok);
-assert(result.errmsg != null);
-
-
-// Config server insert with no config PRIMARY
-configColl.remove({});
-configColl.insert({a:1});
-printjson( request = {update : configColl.getName(),
- updates: [{q: {a:1}, u: {$set: {b:2}}}]} );
-printjson( result = configColl.runCommand(request) );
-assert(!result.ok);
-assert(result.errmsg != null);
-
-// Config server insert with no config PRIMARY
-configColl.remove({});
-configColl.insert({a:1});
-printjson( request = {delete : configColl.getName(),
- deletes: [{q: {a:1}, limit: 0}]} );
-printjson( result = configColl.runCommand(request) );
-assert(!result.ok);
-assert(result.errmsg != null);
-
-jsTest.log("DONE!");
-
-MongoRunner.stopMongos( staleMongos );
-st.stop();
+ "use strict";
+
+ // Only reason for using localhost name is to make the test consistent with naming host so it
+ // will be easier to check for the host name inside error objects.
+ var options = {
+ useHostname: false
+ };
+ var st = new ShardingTest({shards: 2, mongos: 1, config: 3, other: options});
+ st.stopBalancer();
+
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var config = mongos.getDB("config");
+ var shards = config.shards.find().toArray();
+ var configConnStr = st._configDB;
+
+ jsTest.log("Starting sharding batch write tests...");
+
+ var request;
+ var result;
+
+ // NOTE: ALL TESTS BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING
+
+ //
+ //
+ // Mongos _id autogeneration tests for sharded collections
+
+ var coll = mongos.getCollection("foo.bar");
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
+ st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
+ assert.commandWorked(admin.runCommand({shardCollection: coll.toString(), key: {_id: 1}}));
+
+ //
+ // Basic insert no _id
+ coll.remove({});
+ printjson(request = {insert: coll.getName(), documents: [{a: 1}]});
+ printjson(result = coll.runCommand(request));
+ assert(result.ok);
+ assert.eq(1, result.n);
+ assert.eq(1, coll.count());
+
+ //
+ // Multi insert some _ids
+ coll.remove({});
+ printjson(request = {insert: coll.getName(), documents: [{_id: 0, a: 1}, {a: 2}]});
+ printjson(result = coll.runCommand(request));
+ assert(result.ok);
+ assert.eq(2, result.n);
+ assert.eq(2, coll.count());
+ assert.eq(1, coll.count({_id: 0}));
+
+ //
+ // Ensure generating many _ids don't push us over limits
+ var maxDocSize = (16 * 1024 * 1024) / 1000;
+ var baseDocSize = Object.bsonsize({a: 1, data: ""});
+ var dataSize = maxDocSize - baseDocSize;
+
+ var data = "";
+ for (var i = 0; i < dataSize; i++)
+ data += "x";
+
+ var documents = [];
+ for (var i = 0; i < 1000; i++)
+ documents.push({a: i, data: data});
+
+ assert.commandWorked(coll.getMongo().getDB("admin").runCommand({setParameter: 1, logLevel: 4}));
+ coll.remove({});
+ request = {
+ insert: coll.getName(),
+ documents: documents
+ };
+ printjson(result = coll.runCommand(request));
+ assert(result.ok);
+ assert.eq(1000, result.n);
+ assert.eq(1000, coll.count());
+
+ //
+ //
+ // Config server upserts (against admin db, for example) require _id test
+ var adminColl = admin.getCollection(coll.getName());
+
+ //
+ // Without _id
+ adminColl.remove({});
+ printjson(
+ request = {update: adminColl.getName(), updates: [{q: {a: 1}, u: {a: 1}, upsert: true}]});
+ var result = adminColl.runCommand(request);
+ assert.commandWorked(result);
+ assert.eq(1, result.n);
+ assert.eq(1, adminColl.count());
+
+ //
+ // With _id
+ adminColl.remove({});
+ printjson(request = {
+ update: adminColl.getName(),
+ updates: [{q: {_id: 1, a: 1}, u: {a: 1}, upsert: true}]
+ });
+ assert.commandWorked(adminColl.runCommand(request));
+ assert.eq(1, result.n);
+ assert.eq(1, adminColl.count());
+
+ //
+ //
+ // Stale config progress tests
+ // Set up a new collection across two shards, then revert the chunks to an earlier state to put
+ // mongos and mongod permanently out of sync.
+
+ // START SETUP
+ var brokenColl = mongos.getCollection("broken.coll");
+ assert.commandWorked(admin.runCommand({enableSharding: brokenColl.getDB().toString()}));
+ printjson(admin.runCommand({movePrimary: brokenColl.getDB().toString(), to: shards[0]._id}));
+ assert.commandWorked(admin.runCommand({shardCollection: brokenColl.toString(), key: {_id: 1}}));
+ assert.commandWorked(admin.runCommand({split: brokenColl.toString(), middle: {_id: 0}}));
+
+ var oldChunks = config.chunks.find().toArray();
+
+ // Start a new mongos and bring it up-to-date with the chunks so far
+
+ var staleMongos = MongoRunner.runMongos({configdb: configConnStr});
+ brokenColl = staleMongos.getCollection(brokenColl.toString());
+ assert.writeOK(brokenColl.insert({hello: "world"}));
+
+ // Modify the chunks to make shards at a higher version
+
+ assert.commandWorked(
+ admin.runCommand({moveChunk: brokenColl.toString(), find: {_id: 0}, to: shards[1]._id}));
+
+ // Rewrite the old chunks back to the config server
+
+ assert.writeOK(config.chunks.remove({}));
+ for (var i = 0; i < oldChunks.length; i++)
+ assert.writeOK(config.chunks.insert(oldChunks[i]));
+
+ // Stale mongos can no longer bring itself up-to-date!
+ // END SETUP
+
+ //
+ // Config server insert, repeatedly stale
+ printjson(request = {insert: brokenColl.getName(), documents: [{_id: -1}]});
+ printjson(result = brokenColl.runCommand(request));
+ assert(result.ok);
+ assert.eq(0, result.n);
+ assert.eq(1, result.writeErrors.length);
+ assert.eq(0, result.writeErrors[0].index);
+ assert.eq(result.writeErrors[0].code, 82); // No Progress Made
+
+ //
+ // Config server insert to other shard, repeatedly stale
+ printjson(request = {insert: brokenColl.getName(), documents: [{_id: 1}]});
+ printjson(result = brokenColl.runCommand(request));
+ assert(result.ok);
+ assert.eq(0, result.n);
+ assert.eq(1, result.writeErrors.length);
+ assert.eq(0, result.writeErrors[0].index);
+ assert.eq(result.writeErrors[0].code, 82); // No Progress Made
+
+ //
+ //
+ // Tests against config server
+ var configColl = config.getCollection("batch_write_protocol_sharded");
+
+ //
+ // Basic config server insert
+ configColl.remove({});
+ printjson(request = {insert: configColl.getName(), documents: [{a: 1}]});
+ var result = configColl.runCommand(request);
+ assert.commandWorked(result);
+ assert.eq(1, result.n);
+
+ st.configRS.awaitReplication();
+ assert.eq(1, st.config0.getCollection(configColl + "").count());
+ assert.eq(1, st.config1.getCollection(configColl + "").count());
+ assert.eq(1, st.config2.getCollection(configColl + "").count());
+
+ //
+ // Basic config server update
+ configColl.remove({});
+ configColl.insert({a: 1});
+ printjson(request = {update: configColl.getName(), updates: [{q: {a: 1}, u: {$set: {b: 2}}}]});
+ printjson(result = configColl.runCommand(request));
+ assert(result.ok);
+ assert.eq(1, result.n);
+
+ st.configRS.awaitReplication();
+ assert.eq(1, st.config0.getCollection(configColl + "").count({b: 2}));
+ assert.eq(1, st.config1.getCollection(configColl + "").count({b: 2}));
+ assert.eq(1, st.config2.getCollection(configColl + "").count({b: 2}));
+
+ //
+ // Basic config server delete
+ configColl.remove({});
+ configColl.insert({a: 1});
+ printjson(request = {'delete': configColl.getName(), deletes: [{q: {a: 1}, limit: 0}]});
+ printjson(result = configColl.runCommand(request));
+ assert(result.ok);
+ assert.eq(1, result.n);
+
+ st.configRS.awaitReplication();
+ assert.eq(0, st.config0.getCollection(configColl + "").count());
+ assert.eq(0, st.config1.getCollection(configColl + "").count());
+ assert.eq(0, st.config2.getCollection(configColl + "").count());
+
+ MongoRunner.stopMongod(st.config1);
+ MongoRunner.stopMongod(st.config2);
+ st.configRS.awaitNoPrimary();
+
+ // Config server insert with no config PRIMARY
+ configColl.remove({});
+ printjson(request = {insert: configColl.getName(), documents: [{a: 1}]});
+ printjson(result = configColl.runCommand(request));
+ assert(!result.ok);
+ assert(result.errmsg != null);
+
+ // Config server insert with no config PRIMARY
+ configColl.remove({});
+ configColl.insert({a: 1});
+ printjson(request = {update: configColl.getName(), updates: [{q: {a: 1}, u: {$set: {b: 2}}}]});
+ printjson(result = configColl.runCommand(request));
+ assert(!result.ok);
+ assert(result.errmsg != null);
+
+ // Config server insert with no config PRIMARY
+ configColl.remove({});
+ configColl.insert({a: 1});
+ printjson(request = {delete: configColl.getName(), deletes: [{q: {a: 1}, limit: 0}]});
+ printjson(result = configColl.runCommand(request));
+ assert(!result.ok);
+ assert(result.errmsg != null);
+
+ jsTest.log("DONE!");
+
+ MongoRunner.stopMongos(staleMongos);
+ st.stop();
}()); \ No newline at end of file
diff --git a/jstests/sharding/bouncing_count.js b/jstests/sharding/bouncing_count.js
index d2df8c92984..62ff26c08a7 100644
--- a/jstests/sharding/bouncing_count.js
+++ b/jstests/sharding/bouncing_count.js
@@ -1,58 +1,57 @@
// Tests whether new sharding is detected on insert by mongos
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards: 10, mongos: 3 });
+ var st = new ShardingTest({shards: 10, mongos: 3});
-var mongosA = st.s0;
-var mongosB = st.s1;
-var mongosC = st.s2;
+ var mongosA = st.s0;
+ var mongosB = st.s1;
+ var mongosC = st.s2;
-var admin = mongosA.getDB("admin");
-var config = mongosA.getDB("config");
+ var admin = mongosA.getDB("admin");
+ var config = mongosA.getDB("config");
-var collA = mongosA.getCollection("foo.bar");
-var collB = mongosB.getCollection("" + collA);
-var collC = mongosB.getCollection("" + collA);
+ var collA = mongosA.getCollection("foo.bar");
+ var collB = mongosB.getCollection("" + collA);
+ var collC = mongosB.getCollection("" + collA);
-var shards = config.shards.find().sort({ _id: 1 }).toArray();
+ var shards = config.shards.find().sort({_id: 1}).toArray();
-assert.commandWorked(admin.runCommand({ enableSharding: "" + collA.getDB() }));
-st.ensurePrimaryShard(collA.getDB().getName(), shards[1]._id);
-assert.commandWorked(admin.runCommand({ shardCollection: "" + collA, key: { _id: 1 } }));
+ assert.commandWorked(admin.runCommand({enableSharding: "" + collA.getDB()}));
+ st.ensurePrimaryShard(collA.getDB().getName(), shards[1]._id);
+ assert.commandWorked(admin.runCommand({shardCollection: "" + collA, key: {_id: 1}}));
-jsTestLog("Splitting up the collection...");
+ jsTestLog("Splitting up the collection...");
-// Split up the collection
-for(var i = 0; i < shards.length; i++){
- assert.commandWorked(admin.runCommand({ split: "" + collA, middle: { _id: i } }));
- assert.commandWorked(
- admin.runCommand({ moveChunk: "" + collA, find: { _id: i }, to: shards[i]._id }));
-}
+ // Split up the collection
+ for (var i = 0; i < shards.length; i++) {
+ assert.commandWorked(admin.runCommand({split: "" + collA, middle: {_id: i}}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: "" + collA, find: {_id: i}, to: shards[i]._id}));
+ }
-mongosB.getDB("admin").runCommand({ flushRouterConfig: 1 });
-mongosC.getDB("admin").runCommand({ flushRouterConfig: 1 });
+ mongosB.getDB("admin").runCommand({flushRouterConfig: 1});
+ mongosC.getDB("admin").runCommand({flushRouterConfig: 1});
-printjson(collB.count());
-printjson(collC.count());
+ printjson(collB.count());
+ printjson(collC.count());
-// Change up all the versions...
-for(var i = 0; i < shards.length; i++){
- assert.commandWorked(admin.runCommand({ moveChunk: "" + collA,
- find: { _id: i },
- to: shards[ (i + 1) % shards.length ]._id }));
-}
+ // Change up all the versions...
+ for (var i = 0; i < shards.length; i++) {
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: "" + collA, find: {_id: i}, to: shards[(i + 1) % shards.length]._id}));
+ }
-// Make sure mongos A is up-to-date
-mongosA.getDB("admin").runCommand({ flushRouterConfig: 1 });
+ // Make sure mongos A is up-to-date
+ mongosA.getDB("admin").runCommand({flushRouterConfig: 1});
-st.printShardingStatus(true);
+ st.printShardingStatus(true);
-jsTestLog("Running count!");
+ jsTestLog("Running count!");
-printjson(collB.count());
-printjson(collC.find().toArray());
+ printjson(collB.count());
+ printjson(collC.find().toArray());
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/bulk_insert.js b/jstests/sharding/bulk_insert.js
index 306c2a82020..715660fa67f 100644
--- a/jstests/sharding/bulk_insert.js
+++ b/jstests/sharding/bulk_insert.js
@@ -1,327 +1,285 @@
// Tests bulk inserts to mongos
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards : 2, mongos : 2 });
+ var st = new ShardingTest({shards: 2, mongos: 2});
-var mongos = st.s;
-var staleMongos = st.s1;
-var config = mongos.getDB("config");
-var admin = mongos.getDB("admin");
-var shards = config.shards.find().toArray();
+ var mongos = st.s;
+ var staleMongos = st.s1;
+ var config = mongos.getDB("config");
+ var admin = mongos.getDB("admin");
+ var shards = config.shards.find().toArray();
-for (var i = 0; i < shards.length; i++) {
- shards[i].conn = new Mongo(shards[i].host);
-}
+ for (var i = 0; i < shards.length; i++) {
+ shards[i].conn = new Mongo(shards[i].host);
+ }
-var collSh = mongos.getCollection(jsTestName() + ".collSharded");
-var collUn = mongos.getCollection(jsTestName() + ".collUnsharded");
-var collDi = shards[0].conn.getCollection(jsTestName() + ".collDirect");
+ var collSh = mongos.getCollection(jsTestName() + ".collSharded");
+ var collUn = mongos.getCollection(jsTestName() + ".collUnsharded");
+ var collDi = shards[0].conn.getCollection(jsTestName() + ".collDirect");
-jsTest.log('Checking write to config collections...');
-assert.writeOK(admin.TestColl.insert({ SingleDoc: 1 }));
-assert.writeError(admin.TestColl.insert([ { Doc1: 1 }, { Doc2: 1 } ]));
+ jsTest.log('Checking write to config collections...');
+ assert.writeOK(admin.TestColl.insert({SingleDoc: 1}));
+ assert.writeError(admin.TestColl.insert([{Doc1: 1}, {Doc2: 1}]));
-jsTest.log("Setting up collections...");
+ jsTest.log("Setting up collections...");
-assert.commandWorked(admin.runCommand({ enableSharding : collSh.getDB() + "" }));
-st.ensurePrimaryShard(collSh.getDB() + "", shards[0]._id);
+ assert.commandWorked(admin.runCommand({enableSharding: collSh.getDB() + ""}));
+ st.ensurePrimaryShard(collSh.getDB() + "", shards[0]._id);
-assert.commandWorked(admin.runCommand({ movePrimary : collUn.getDB() + "",
- to : shards[1]._id}));
+ assert.commandWorked(admin.runCommand({movePrimary: collUn.getDB() + "", to: shards[1]._id}));
-printjson(collSh.ensureIndex({ukey : 1}, {unique : true}));
-printjson(collUn.ensureIndex({ukey : 1}, {unique : true}));
-printjson(collDi.ensureIndex({ukey : 1}, {unique : true}));
+ printjson(collSh.ensureIndex({ukey: 1}, {unique: true}));
+ printjson(collUn.ensureIndex({ukey: 1}, {unique: true}));
+ printjson(collDi.ensureIndex({ukey: 1}, {unique: true}));
-assert.commandWorked(admin.runCommand({ shardCollection : collSh + "",
- key : {ukey : 1} }));
-assert.commandWorked(admin.runCommand({ split : collSh + "",
- middle : {ukey : 0} }));
-assert.commandWorked(admin.runCommand({ moveChunk: collSh + "",
- find: { ukey: 0 },
- to: shards[0]._id,
- _waitForDelete: true }));
+ assert.commandWorked(admin.runCommand({shardCollection: collSh + "", key: {ukey: 1}}));
+ assert.commandWorked(admin.runCommand({split: collSh + "", middle: {ukey: 0}}));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: collSh + "", find: {ukey: 0}, to: shards[0]._id, _waitForDelete: true}));
-var resetColls = function() {
- assert.writeOK(collSh.remove({}));
- assert.writeOK(collUn.remove({}));
- assert.writeOK(collDi.remove({}));
-};
+ var resetColls = function() {
+ assert.writeOK(collSh.remove({}));
+ assert.writeOK(collUn.remove({}));
+ assert.writeOK(collDi.remove({}));
+ };
-var isDupKeyError = function(err) {
- return /dup key/.test(err + "");
-};
+ var isDupKeyError = function(err) {
+ return /dup key/.test(err + "");
+ };
-jsTest.log("Collections created.");
-st.printShardingStatus();
+ jsTest.log("Collections created.");
+ st.printShardingStatus();
-//
-// BREAK-ON-ERROR
-//
+ //
+ // BREAK-ON-ERROR
+ //
-jsTest.log("Bulk insert (no ContinueOnError) to single shard...");
+ jsTest.log("Bulk insert (no ContinueOnError) to single shard...");
-resetColls();
-var inserts = [{ukey : 0},
- {ukey : 1}];
+ resetColls();
+ var inserts = [{ukey: 0}, {ukey: 1}];
-assert.writeOK(collSh.insert(inserts));
-assert.eq(2, collSh.find().itcount());
+ assert.writeOK(collSh.insert(inserts));
+ assert.eq(2, collSh.find().itcount());
-assert.writeOK(collUn.insert(inserts));
-assert.eq(2, collUn.find().itcount());
+ assert.writeOK(collUn.insert(inserts));
+ assert.eq(2, collUn.find().itcount());
-assert.writeOK(collDi.insert(inserts));
-assert.eq(2, collDi.find().itcount());
+ assert.writeOK(collDi.insert(inserts));
+ assert.eq(2, collDi.find().itcount());
-jsTest.log("Bulk insert (no COE) with mongos error...");
+ jsTest.log("Bulk insert (no COE) with mongos error...");
-resetColls();
-var inserts = [{ukey : 0},
- {hello : "world"},
- {ukey : 1}];
+ resetColls();
+ var inserts = [{ukey: 0}, {hello: "world"}, {ukey: 1}];
-assert.writeError(collSh.insert(inserts));
-assert.eq(1, collSh.find().itcount());
+ assert.writeError(collSh.insert(inserts));
+ assert.eq(1, collSh.find().itcount());
-jsTest.log("Bulk insert (no COE) with mongod error...");
+ jsTest.log("Bulk insert (no COE) with mongod error...");
-resetColls();
-var inserts = [{ukey : 0},
- {ukey : 0},
- {ukey : 1}];
+ resetColls();
+ var inserts = [{ukey: 0}, {ukey: 0}, {ukey: 1}];
-assert.writeError(collSh.insert(inserts));
-assert.eq(1, collSh.find().itcount());
+ assert.writeError(collSh.insert(inserts));
+ assert.eq(1, collSh.find().itcount());
-assert.writeError(collUn.insert(inserts));
-assert.eq(1, collUn.find().itcount());
+ assert.writeError(collUn.insert(inserts));
+ assert.eq(1, collUn.find().itcount());
-assert.writeError(collDi.insert(inserts));
-assert.eq(1, collDi.find().itcount());
+ assert.writeError(collDi.insert(inserts));
+ assert.eq(1, collDi.find().itcount());
-jsTest.log("Bulk insert (no COE) with mongod and mongos error...");
+ jsTest.log("Bulk insert (no COE) with mongod and mongos error...");
-resetColls();
-var inserts = [{ukey : 0},
- {ukey : 0},
- {ukey : 1},
- {hello : "world"}];
+ resetColls();
+ var inserts = [{ukey: 0}, {ukey: 0}, {ukey: 1}, {hello: "world"}];
-var res = assert.writeError(collSh.insert(inserts));
-assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
-assert.eq(1, collSh.find().itcount());
+ var res = assert.writeError(collSh.insert(inserts));
+ assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+ assert.eq(1, collSh.find().itcount());
-res = assert.writeError(collUn.insert(inserts));
-assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
-assert.eq(1, collUn.find().itcount());
+ res = assert.writeError(collUn.insert(inserts));
+ assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+ assert.eq(1, collUn.find().itcount());
-res = assert.writeError(collDi.insert(inserts));
-assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
-assert.eq(1, collDi.find().itcount());
+ res = assert.writeError(collDi.insert(inserts));
+ assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+ assert.eq(1, collDi.find().itcount());
-jsTest.log("Bulk insert (no COE) on second shard...");
+ jsTest.log("Bulk insert (no COE) on second shard...");
-resetColls();
-var inserts = [{ukey : 0},
- {ukey : -1}];
+ resetColls();
+ var inserts = [{ukey: 0}, {ukey: -1}];
-assert.writeOK(collSh.insert(inserts));
-assert.eq(2, collSh.find().itcount());
+ assert.writeOK(collSh.insert(inserts));
+ assert.eq(2, collSh.find().itcount());
-assert.writeOK(collUn.insert(inserts));
-assert.eq(2, collUn.find().itcount());
+ assert.writeOK(collUn.insert(inserts));
+ assert.eq(2, collUn.find().itcount());
-assert.writeOK(collDi.insert(inserts));
-assert.eq(2, collDi.find().itcount());
+ assert.writeOK(collDi.insert(inserts));
+ assert.eq(2, collDi.find().itcount());
-jsTest.log("Bulk insert to second shard (no COE) with mongos error...");
+ jsTest.log("Bulk insert to second shard (no COE) with mongos error...");
-resetColls();
-var inserts = [{ukey : 0},
- {ukey : 1}, // switches shards
- {ukey : -1},
- {hello : "world"}];
+ resetColls();
+ var inserts = [
+ {ukey: 0},
+ {ukey: 1}, // switches shards
+ {ukey: -1},
+ {hello: "world"}
+ ];
-assert.writeError(collSh.insert(inserts));
-assert.eq(3, collSh.find().itcount());
+ assert.writeError(collSh.insert(inserts));
+ assert.eq(3, collSh.find().itcount());
-jsTest.log("Bulk insert to second shard (no COE) with mongod error...");
+ jsTest.log("Bulk insert to second shard (no COE) with mongod error...");
-resetColls();
-var inserts = [{ukey : 0},
- {ukey : 1},
- {ukey : -1},
- {ukey : -2},
- {ukey : -2}];
+ resetColls();
+ var inserts = [{ukey: 0}, {ukey: 1}, {ukey: -1}, {ukey: -2}, {ukey: -2}];
-assert.writeError(collSh.insert(inserts));
-assert.eq(4, collSh.find().itcount());
+ assert.writeError(collSh.insert(inserts));
+ assert.eq(4, collSh.find().itcount());
-assert.writeError(collUn.insert(inserts));
-assert.eq(4, collUn.find().itcount());
+ assert.writeError(collUn.insert(inserts));
+ assert.eq(4, collUn.find().itcount());
-assert.writeError(collDi.insert(inserts));
-assert.eq(4, collDi.find().itcount());
+ assert.writeError(collDi.insert(inserts));
+ assert.eq(4, collDi.find().itcount());
-jsTest.log("Bulk insert to third shard (no COE) with mongod and mongos error...");
+ jsTest.log("Bulk insert to third shard (no COE) with mongod and mongos error...");
-resetColls();
-var inserts = [{ukey : 0},
- {ukey : 1},
- {ukey : -2},
- {ukey : -3},
- {ukey : 4},
- {ukey : 4},
- {hello : "world"}];
+ resetColls();
+ var inserts =
+ [{ukey: 0}, {ukey: 1}, {ukey: -2}, {ukey: -3}, {ukey: 4}, {ukey: 4}, {hello: "world"}];
-res = assert.writeError(collSh.insert(inserts));
-assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
-assert.eq(5, collSh.find().itcount());
+ res = assert.writeError(collSh.insert(inserts));
+ assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+ assert.eq(5, collSh.find().itcount());
-res = assert.writeError(collUn.insert(inserts));
-assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
-assert.eq(5, collUn.find().itcount());
+ res = assert.writeError(collUn.insert(inserts));
+ assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+ assert.eq(5, collUn.find().itcount());
-res = assert.writeError(collDi.insert(inserts));
-assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
-assert.eq(5, collDi.find().itcount());
+ res = assert.writeError(collDi.insert(inserts));
+ assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+ assert.eq(5, collDi.find().itcount());
-//
-// CONTINUE-ON-ERROR
-//
+ //
+ // CONTINUE-ON-ERROR
+ //
-jsTest.log("Bulk insert (yes COE) with mongos error...");
+ jsTest.log("Bulk insert (yes COE) with mongos error...");
-resetColls();
-var inserts = [{ukey : 0},
- {hello : "world"},
- {ukey : 1}];
+ resetColls();
+ var inserts = [{ukey: 0}, {hello: "world"}, {ukey: 1}];
-assert.writeError(collSh.insert(inserts, 1)); // COE
-assert.eq(2, collSh.find().itcount());
+ assert.writeError(collSh.insert(inserts, 1)); // COE
+ assert.eq(2, collSh.find().itcount());
-jsTest.log("Bulk insert (yes COE) with mongod error...");
-
-resetColls();
-var inserts = [{ukey : 0},
- {ukey : 0},
- {ukey : 1}];
+ jsTest.log("Bulk insert (yes COE) with mongod error...");
-assert.writeError(collSh.insert(inserts, 1));
-assert.eq(2, collSh.find().itcount());
+ resetColls();
+ var inserts = [{ukey: 0}, {ukey: 0}, {ukey: 1}];
-assert.writeError(collUn.insert(inserts, 1));
-assert.eq(2, collUn.find().itcount());
-
-assert.writeError(collDi.insert(inserts, 1));
-assert.eq(2, collDi.find().itcount());
-
-jsTest
- .log("Bulk insert to third shard (yes COE) with mongod and mongos error...");
-
-resetColls();
-var inserts = [{ukey : 0},
- {ukey : 1},
- {ukey : -2},
- {ukey : -3},
- {ukey : 4},
- {ukey : 4},
- {hello : "world"}];
-
-// Last error here is mongos error
-res = assert.writeError(collSh.insert(inserts, 1));
-assert(!isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
-assert.eq(5, collSh.find().itcount());
-
-// Extra insert goes through, since mongos error "doesn't count"
-res = assert.writeError(collUn.insert(inserts, 1));
-assert.eq(6, res.nInserted, res.toString());
-assert.eq(6, collUn.find().itcount());
-
-res = assert.writeError(collDi.insert(inserts, 1));
-assert.eq(6, res.nInserted, res.toString());
-assert.eq(6, collDi.find().itcount());
-
-jsTest.log("Bulk insert to third shard (yes COE) with mongod and mongos error "
- + "(mongos error first)...");
-
-resetColls();
-var inserts = [{ukey : 0},
- {ukey : 1},
- {ukey : -2},
- {ukey : -3},
- {hello : "world"},
- {ukey : 4},
- {ukey : 4}];
-
-// Last error here is mongos error
-res = assert.writeError(collSh.insert(inserts, 1));
-assert(isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
-assert.eq(5, collSh.find().itcount());
-
-// Extra insert goes through, since mongos error "doesn't count"
-res = assert.writeError(collUn.insert(inserts, 1));
-assert(isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
-assert.eq(6, collUn.find().itcount());
-
-res = assert.writeError(collDi.insert(inserts, 1));
-assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
-assert.eq(6, collDi.find().itcount());
-
-//
-// Test when WBL has to be invoked mid-insert
-//
-
-jsTest.log("Testing bulk insert (no COE) with WBL...");
-resetColls();
-
-var inserts = [{ukey : 1},
- {ukey : -1}];
-
-var staleCollSh = staleMongos.getCollection(collSh + "");
-assert.eq(null, staleCollSh.findOne(), 'Collections should be empty');
-
-assert.commandWorked(admin.runCommand({ moveChunk : collSh + "",
- find : {ukey : 0},
- to : shards[1]._id,
- _waitForDelete: true }));
-assert.commandWorked(admin.runCommand({ moveChunk : collSh + "",
- find : {ukey : 0},
- to : shards[0]._id,
- _waitForDelete: true}));
-
-assert.writeOK(staleCollSh.insert(inserts));
-
-//
-// Test when the objects to be bulk inserted are 10MB, and so can't be inserted
-// together with WBL.
-//
-
-jsTest.log("Testing bulk insert (no COE) with WBL and large objects...");
-resetColls();
-
-var data10MB = 'x'.repeat(10 * 1024 * 1024);
-var inserts = [{ukey : 1, data : data10MB},
- {ukey : 2, data : data10MB},
- {ukey : -1, data : data10MB},
- {ukey : -2, data : data10MB}];
-
-staleCollSh = staleMongos.getCollection(collSh + "");
-assert.eq(null, staleCollSh.findOne(), 'Collections should be empty');
-
-assert.commandWorked(admin.runCommand({ moveChunk : collSh + "",
- find : {ukey : 0},
- to : shards[1]._id,
- _waitForDelete: true }));
-assert.commandWorked(admin.runCommand({ moveChunk : collSh + "",
- find : {ukey : 0},
- to : shards[0]._id,
- _waitForDelete: true }));
-
-assert.writeOK(staleCollSh.insert(inserts));
-
-st.stop();
+ assert.writeError(collSh.insert(inserts, 1));
+ assert.eq(2, collSh.find().itcount());
+
+ assert.writeError(collUn.insert(inserts, 1));
+ assert.eq(2, collUn.find().itcount());
+
+ assert.writeError(collDi.insert(inserts, 1));
+ assert.eq(2, collDi.find().itcount());
+
+ jsTest.log("Bulk insert to third shard (yes COE) with mongod and mongos error...");
+
+ resetColls();
+ var inserts =
+ [{ukey: 0}, {ukey: 1}, {ukey: -2}, {ukey: -3}, {ukey: 4}, {ukey: 4}, {hello: "world"}];
+
+ // Last error here is mongos error
+ res = assert.writeError(collSh.insert(inserts, 1));
+ assert(!isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg),
+ res.toString());
+ assert.eq(5, collSh.find().itcount());
+
+ // Extra insert goes through, since mongos error "doesn't count"
+ res = assert.writeError(collUn.insert(inserts, 1));
+ assert.eq(6, res.nInserted, res.toString());
+ assert.eq(6, collUn.find().itcount());
+
+ res = assert.writeError(collDi.insert(inserts, 1));
+ assert.eq(6, res.nInserted, res.toString());
+ assert.eq(6, collDi.find().itcount());
+
+ jsTest.log("Bulk insert to third shard (yes COE) with mongod and mongos error " +
+ "(mongos error first)...");
+
+ resetColls();
+ var inserts =
+ [{ukey: 0}, {ukey: 1}, {ukey: -2}, {ukey: -3}, {hello: "world"}, {ukey: 4}, {ukey: 4}];
+
+ // Last error here is mongos error
+ res = assert.writeError(collSh.insert(inserts, 1));
+ assert(isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
+ assert.eq(5, collSh.find().itcount());
+
+ // Extra insert goes through, since mongos error "doesn't count"
+ res = assert.writeError(collUn.insert(inserts, 1));
+ assert(isDupKeyError(res.getWriteErrorAt(res.getWriteErrorCount() - 1).errmsg), res.toString());
+ assert.eq(6, collUn.find().itcount());
+
+ res = assert.writeError(collDi.insert(inserts, 1));
+ assert(isDupKeyError(res.getWriteErrorAt(0).errmsg), res.toString());
+ assert.eq(6, collDi.find().itcount());
+
+ //
+ // Test when WBL has to be invoked mid-insert
+ //
+
+ jsTest.log("Testing bulk insert (no COE) with WBL...");
+ resetColls();
+
+ var inserts = [{ukey: 1}, {ukey: -1}];
+
+ var staleCollSh = staleMongos.getCollection(collSh + "");
+ assert.eq(null, staleCollSh.findOne(), 'Collections should be empty');
+
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: collSh + "", find: {ukey: 0}, to: shards[1]._id, _waitForDelete: true}));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: collSh + "", find: {ukey: 0}, to: shards[0]._id, _waitForDelete: true}));
+
+ assert.writeOK(staleCollSh.insert(inserts));
+
+ //
+ // Test when the objects to be bulk inserted are 10MB, and so can't be inserted
+ // together with WBL.
+ //
+
+ jsTest.log("Testing bulk insert (no COE) with WBL and large objects...");
+ resetColls();
+
+ var data10MB = 'x'.repeat(10 * 1024 * 1024);
+ var inserts = [
+ {ukey: 1, data: data10MB},
+ {ukey: 2, data: data10MB},
+ {ukey: -1, data: data10MB},
+ {ukey: -2, data: data10MB}
+ ];
+
+ staleCollSh = staleMongos.getCollection(collSh + "");
+ assert.eq(null, staleCollSh.findOne(), 'Collections should be empty');
+
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: collSh + "", find: {ukey: 0}, to: shards[1]._id, _waitForDelete: true}));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: collSh + "", find: {ukey: 0}, to: shards[0]._id, _waitForDelete: true}));
+
+ assert.writeOK(staleCollSh.insert(inserts));
+
+ st.stop();
})();
diff --git a/jstests/sharding/bulk_shard_insert.js b/jstests/sharding/bulk_shard_insert.js
index 7d42d8b41a2..943fe270ba0 100644
--- a/jstests/sharding/bulk_shard_insert.js
+++ b/jstests/sharding/bulk_shard_insert.js
@@ -1,81 +1,81 @@
// Test bulk inserts with sharding
(function() {
-// Setup randomized test
-var seed = new Date().getTime();
-// seed = 0
+ // Setup randomized test
+ var seed = new Date().getTime();
+ // seed = 0
-Random.srand( seed );
-print( "Seeded with " + seed );
+ Random.srand(seed);
+ print("Seeded with " + seed);
-var st = new ShardingTest({ name : jsTestName(), shards : 4, chunkSize: 1 });
+ var st = new ShardingTest({name: jsTestName(), shards: 4, chunkSize: 1});
-// Setup sharded collection
-var mongos = st.s0;
-var db = mongos.getDB( jsTestName() );
-var coll = db.coll;
-st.shardColl( coll, { _id : 1 }, false );
+ // Setup sharded collection
+ var mongos = st.s0;
+ var db = mongos.getDB(jsTestName());
+ var coll = db.coll;
+ st.shardColl(coll, {_id: 1}, false);
-// Insert lots of bulk documents
-var numDocs = 1000000;
+ // Insert lots of bulk documents
+ var numDocs = 1000000;
-var bulkSize = Math.floor( Random.rand() * 1000 ) + 2;
-bulkSize = 4000;
-var docSize = 128; /* bytes */
-print( "\n\n\nBulk size is " + bulkSize );
+ var bulkSize = Math.floor(Random.rand() * 1000) + 2;
+ bulkSize = 4000;
+ var docSize = 128; /* bytes */
+ print("\n\n\nBulk size is " + bulkSize);
-var data = "x";
-while( Object.bsonsize({ x : data }) < docSize ){
- data += data;
-}
+ var data = "x";
+ while (Object.bsonsize({x: data}) < docSize) {
+ data += data;
+ }
-print( "\n\n\nDocument size is " + Object.bsonsize({ x : data }) );
+ print("\n\n\nDocument size is " + Object.bsonsize({x: data}));
-var docsInserted = 0;
-var balancerOn = false;
+ var docsInserted = 0;
+ var balancerOn = false;
-while (docsInserted < numDocs) {
- var currBulkSize = ( numDocs - docsInserted > bulkSize ) ? bulkSize : ( numDocs - docsInserted );
-
- var bulk = [];
- for( var i = 0; i < currBulkSize; i++ ){
- bulk.push({hi: "there", at: docsInserted, i: i, x: data});
- }
-
- assert.writeOK(coll.insert( bulk ));
-
- if( Math.floor( docsInserted / 10000 ) != Math.floor( ( docsInserted + currBulkSize ) / 10000 ) ){
- print( "Inserted " + (docsInserted + currBulkSize) + " documents." );
- st.printShardingStatus();
- }
-
- docsInserted += currBulkSize;
-
- if( docsInserted > numDocs / 2 && ! balancerOn ){
- print( "Turning on balancer after half documents inserted." );
- st.startBalancer();
- balancerOn = true;
- }
-}
+ while (docsInserted < numDocs) {
+ var currBulkSize =
+ (numDocs - docsInserted > bulkSize) ? bulkSize : (numDocs - docsInserted);
+
+ var bulk = [];
+ for (var i = 0; i < currBulkSize; i++) {
+ bulk.push({hi: "there", at: docsInserted, i: i, x: data});
+ }
-// Check we inserted all the documents
-st.printShardingStatus();
+ assert.writeOK(coll.insert(bulk));
+
+ if (Math.floor(docsInserted / 10000) != Math.floor((docsInserted + currBulkSize) / 10000)) {
+ print("Inserted " + (docsInserted + currBulkSize) + " documents.");
+ st.printShardingStatus();
+ }
+
+ docsInserted += currBulkSize;
+
+ if (docsInserted > numDocs / 2 && !balancerOn) {
+ print("Turning on balancer after half documents inserted.");
+ st.startBalancer();
+ balancerOn = true;
+ }
+ }
-var count = coll.find().count();
-var itcount = count; //coll.find().itcount()
+ // Check we inserted all the documents
+ st.printShardingStatus();
-print("Inserted " + docsInserted + " count : " + count + " itcount : " + itcount);
+ var count = coll.find().count();
+ var itcount = count; // coll.find().itcount()
-st.startBalancer();
+ print("Inserted " + docsInserted + " count : " + count + " itcount : " + itcount);
-var count = coll.find().count();
-var itcount = coll.find().itcount();
+ st.startBalancer();
-print( "Inserted " + docsInserted + " count : " + count + " itcount : " + itcount );
+ var count = coll.find().count();
+ var itcount = coll.find().itcount();
+ print("Inserted " + docsInserted + " count : " + count + " itcount : " + itcount);
-// SERVER-3645
-// assert.eq( docsInserted, count )
-assert.eq(docsInserted, itcount);
+ // SERVER-3645
+ // assert.eq( docsInserted, count )
+ assert.eq(docsInserted, itcount);
})();
diff --git a/jstests/sharding/cleanup_orphaned.js b/jstests/sharding/cleanup_orphaned.js
index bbe383b94ce..a63991f7a23 100644
--- a/jstests/sharding/cleanup_orphaned.js
+++ b/jstests/sharding/cleanup_orphaned.js
@@ -9,7 +9,9 @@ testCleanupOrphaned({
shardKey: {_id: 1},
keyGen: function() {
var ids = [];
- for (var i = -50; i < 50; i++) { ids.push({_id: i}); }
+ for (var i = -50; i < 50; i++) {
+ ids.push({_id: i});
+ }
return ids;
}
});
diff --git a/jstests/sharding/cleanup_orphaned_auth.js b/jstests/sharding/cleanup_orphaned_auth.js
index e1364f4ad12..0b50742ad70 100644
--- a/jstests/sharding/cleanup_orphaned_auth.js
+++ b/jstests/sharding/cleanup_orphaned_auth.js
@@ -2,58 +2,55 @@
// Tests of cleanupOrphaned command permissions.
//
-(function() {
-"use strict";
+(function() {
+ "use strict";
-function assertUnauthorized(res, msg){
- if (assert._debug && msg) print("in assert for: " + msg);
+ function assertUnauthorized(res, msg) {
+ if (assert._debug && msg)
+ print("in assert for: " + msg);
- if (res.ok == 0 && res.errmsg.startsWith('not authorized'))
- return;
+ if (res.ok == 0 && res.errmsg.startsWith('not authorized'))
+ return;
- var finalMsg = "command worked when it should have been unauthorized: " + tojson(res);
- if (msg) { finalMsg += " : " + msg; }
- doassert(finalMsg);
-}
+ var finalMsg = "command worked when it should have been unauthorized: " + tojson(res);
+ if (msg) {
+ finalMsg += " : " + msg;
+ }
+ doassert(finalMsg);
+ }
-var st = new ShardingTest({
- auth: true,
- keyFile: 'jstests/libs/key1',
- other: {useHostname: false}
-});
+ var st =
+ new ShardingTest({auth: true, keyFile: 'jstests/libs/key1', other: {useHostname: false}});
-var shardAdmin = st.shard0.getDB('admin');
-shardAdmin.createUser({user: 'admin', pwd: 'x', roles: ['clusterAdmin', 'userAdminAnyDatabase']});
-shardAdmin.auth('admin', 'x');
+ var shardAdmin = st.shard0.getDB('admin');
+ shardAdmin.createUser(
+ {user: 'admin', pwd: 'x', roles: ['clusterAdmin', 'userAdminAnyDatabase']});
+ shardAdmin.auth('admin', 'x');
-var mongos = st.s0;
-var mongosAdmin = mongos.getDB('admin');
-var coll = mongos.getCollection('foo.bar');
+ var mongos = st.s0;
+ var mongosAdmin = mongos.getDB('admin');
+ var coll = mongos.getCollection('foo.bar');
-mongosAdmin.createUser({user: 'admin', pwd: 'x', roles: ['clusterAdmin', 'userAdminAnyDatabase']});
-mongosAdmin.auth('admin', 'x');
+ mongosAdmin.createUser(
+ {user: 'admin', pwd: 'x', roles: ['clusterAdmin', 'userAdminAnyDatabase']});
+ mongosAdmin.auth('admin', 'x');
-assert.commandWorked(mongosAdmin.runCommand({
- enableSharding: coll.getDB().getName()
-}));
+ assert.commandWorked(mongosAdmin.runCommand({enableSharding: coll.getDB().getName()}));
-assert.commandWorked(mongosAdmin.runCommand({
- shardCollection: coll.getFullName(),
- key: {_id: 'hashed'}
-}));
+ assert.commandWorked(
+ mongosAdmin.runCommand({shardCollection: coll.getFullName(), key: {_id: 'hashed'}}));
+ // cleanupOrphaned requires auth as admin user.
+ assert.commandWorked(shardAdmin.logout());
+ assertUnauthorized(shardAdmin.runCommand({cleanupOrphaned: 'foo.bar'}));
-// cleanupOrphaned requires auth as admin user.
-assert.commandWorked(shardAdmin.logout());
-assertUnauthorized(shardAdmin.runCommand({cleanupOrphaned: 'foo.bar'}));
+ var fooDB = st.shard0.getDB('foo');
+ shardAdmin.auth('admin', 'x');
+ fooDB.createUser({user: 'user', pwd: 'x', roles: ['readWrite', 'dbAdmin']});
+ shardAdmin.logout();
+ fooDB.auth('user', 'x');
+ assertUnauthorized(shardAdmin.runCommand({cleanupOrphaned: 'foo.bar'}));
-var fooDB = st.shard0.getDB('foo');
-shardAdmin.auth('admin', 'x');
-fooDB.createUser({user:'user', pwd:'x', roles:['readWrite', 'dbAdmin']});
-shardAdmin.logout();
-fooDB.auth('user', 'x');
-assertUnauthorized(shardAdmin.runCommand({cleanupOrphaned: 'foo.bar'}));
-
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/cleanup_orphaned_basic.js b/jstests/sharding/cleanup_orphaned_basic.js
index 387863ee75d..3ed9015941b 100644
--- a/jstests/sharding/cleanup_orphaned_basic.js
+++ b/jstests/sharding/cleanup_orphaned_basic.js
@@ -3,142 +3,112 @@
// command fail.
//
-(function() {
-"use strict";
-
-/*****************************************************************************
- * Unsharded mongod.
- ****************************************************************************/
-
-// cleanupOrphaned fails against unsharded mongod.
-var mongod = MongoRunner.runMongod();
-assert.commandFailed(mongod.getDB('admin').runCommand({cleanupOrphaned: 'foo.bar'}));
-
-/*****************************************************************************
- * Bad invocations of cleanupOrphaned command.
- ****************************************************************************/
-
-var st = new ShardingTest({
- other: {
- rs: true,
- rsOptions: {nodes: 2}
- }
-});
-
-var mongos = st.s0;
-var shards = mongos.getCollection('config.shards').find().toArray();
-var mongosAdmin = mongos.getDB('admin');
-var dbName = 'foo';
-var collectionName = 'bar';
-var ns = dbName + '.' + collectionName;
-var coll = mongos.getCollection(ns);
-
-// cleanupOrphaned fails against mongos ('no such command'): it must be run
-// on mongod.
-assert.commandFailed(mongosAdmin.runCommand({cleanupOrphaned: ns}));
-
-// cleanupOrphaned must be run on admin DB.
-var shardFooDB = st.shard0.getDB(dbName);
-assert.commandFailed(shardFooDB.runCommand({cleanupOrphaned: ns}));
-
-// Must be run on primary.
-var secondaryAdmin = st.rs0.getSecondary().getDB('admin');
-var response = secondaryAdmin.runCommand({cleanupOrphaned: ns});
-print('cleanupOrphaned on secondary:');
-printjson(response);
-assert.commandFailed(response);
-
-var shardAdmin = st.shard0.getDB('admin');
-var badNS = ' \\/."*<>:|?';
-assert.commandFailed(shardAdmin.runCommand({cleanupOrphaned: badNS}));
-
-// cleanupOrphaned works on sharded collection.
-assert.commandWorked(mongosAdmin.runCommand({
- enableSharding: coll.getDB().getName()
-}));
-
-st.ensurePrimaryShard(coll.getDB().getName(), shards[0]._id);
-
-assert.commandWorked(mongosAdmin.runCommand({
- shardCollection: ns,
- key: {_id: 1}
-}));
-
-assert.commandWorked(shardAdmin.runCommand({cleanupOrphaned: ns}));
-
-/*****************************************************************************
- * Empty shard.
- ****************************************************************************/
-
-// Ping shard[1] so it will be aware that it is sharded. Otherwise cleanupOrphaned
-// may fail.
-assert.commandWorked(mongosAdmin.runCommand({
- moveChunk: coll.getFullName(),
- find: {_id: 1},
- to: shards[1]._id
-}));
-
-assert.commandWorked(mongosAdmin.runCommand({
- moveChunk: coll.getFullName(),
- find: {_id: 1},
- to: shards[0]._id
-}));
-
-// Collection's home is shard0, there are no chunks assigned to shard1.
-st.shard1.getCollection(ns).insert({});
-assert.eq(null, st.shard1.getDB(dbName).getLastError());
-assert.eq(1, st.shard1.getCollection(ns).count());
-response = st.shard1.getDB('admin').runCommand({cleanupOrphaned: ns});
-assert.commandWorked(response);
-assert.eq({_id: {$maxKey:1}}, response.stoppedAtKey);
-assert.eq(
- 0, st.shard1.getCollection(ns).count(),
- "cleanupOrphaned didn't delete orphan on empty shard.");
-
-/*****************************************************************************
- * Bad startingFromKeys.
- ****************************************************************************/
-
-// startingFromKey of MaxKey.
-response = shardAdmin.runCommand({
- cleanupOrphaned: ns,
- startingFromKey: {_id: MaxKey}
-});
-assert.commandWorked(response);
-assert.eq(null, response.stoppedAtKey);
-
-// startingFromKey doesn't match number of fields in shard key.
-assert.commandFailed(shardAdmin.runCommand({
- cleanupOrphaned: ns,
- startingFromKey: {someKey: 'someValue', someOtherKey: 1}
-}));
-
-// startingFromKey matches number of fields in shard key but not field names.
-assert.commandFailed(shardAdmin.runCommand({
- cleanupOrphaned: ns,
- startingFromKey: {someKey: 'someValue'}
-}));
-
-var coll2 = mongos.getCollection('foo.baz');
-
-assert.commandWorked(mongosAdmin.runCommand({
- shardCollection: coll2.getFullName(),
- key: {a:1, b:1}
-}));
-
-
-// startingFromKey doesn't match number of fields in shard key.
-assert.commandFailed(shardAdmin.runCommand({
- cleanupOrphaned: coll2.getFullName(),
- startingFromKey: {someKey: 'someValue'}
-}));
-
-// startingFromKey matches number of fields in shard key but not field names.
-assert.commandFailed(shardAdmin.runCommand({
- cleanupOrphaned: coll2.getFullName(),
- startingFromKey: {a: 'someValue', c: 1}
-}));
-
-st.stop();
+(function() {
+ "use strict";
+
+ /*****************************************************************************
+ * Unsharded mongod.
+ ****************************************************************************/
+
+ // cleanupOrphaned fails against unsharded mongod.
+ var mongod = MongoRunner.runMongod();
+ assert.commandFailed(mongod.getDB('admin').runCommand({cleanupOrphaned: 'foo.bar'}));
+
+ /*****************************************************************************
+ * Bad invocations of cleanupOrphaned command.
+ ****************************************************************************/
+
+ var st = new ShardingTest({other: {rs: true, rsOptions: {nodes: 2}}});
+
+ var mongos = st.s0;
+ var shards = mongos.getCollection('config.shards').find().toArray();
+ var mongosAdmin = mongos.getDB('admin');
+ var dbName = 'foo';
+ var collectionName = 'bar';
+ var ns = dbName + '.' + collectionName;
+ var coll = mongos.getCollection(ns);
+
+ // cleanupOrphaned fails against mongos ('no such command'): it must be run
+ // on mongod.
+ assert.commandFailed(mongosAdmin.runCommand({cleanupOrphaned: ns}));
+
+ // cleanupOrphaned must be run on admin DB.
+ var shardFooDB = st.shard0.getDB(dbName);
+ assert.commandFailed(shardFooDB.runCommand({cleanupOrphaned: ns}));
+
+ // Must be run on primary.
+ var secondaryAdmin = st.rs0.getSecondary().getDB('admin');
+ var response = secondaryAdmin.runCommand({cleanupOrphaned: ns});
+ print('cleanupOrphaned on secondary:');
+ printjson(response);
+ assert.commandFailed(response);
+
+ var shardAdmin = st.shard0.getDB('admin');
+ var badNS = ' \\/."*<>:|?';
+ assert.commandFailed(shardAdmin.runCommand({cleanupOrphaned: badNS}));
+
+ // cleanupOrphaned works on sharded collection.
+ assert.commandWorked(mongosAdmin.runCommand({enableSharding: coll.getDB().getName()}));
+
+ st.ensurePrimaryShard(coll.getDB().getName(), shards[0]._id);
+
+ assert.commandWorked(mongosAdmin.runCommand({shardCollection: ns, key: {_id: 1}}));
+
+ assert.commandWorked(shardAdmin.runCommand({cleanupOrphaned: ns}));
+
+ /*****************************************************************************
+ * Empty shard.
+ ****************************************************************************/
+
+ // Ping shard[1] so it will be aware that it is sharded. Otherwise cleanupOrphaned
+ // may fail.
+ assert.commandWorked(mongosAdmin.runCommand(
+ {moveChunk: coll.getFullName(), find: {_id: 1}, to: shards[1]._id}));
+
+ assert.commandWorked(mongosAdmin.runCommand(
+ {moveChunk: coll.getFullName(), find: {_id: 1}, to: shards[0]._id}));
+
+ // Collection's home is shard0, there are no chunks assigned to shard1.
+ st.shard1.getCollection(ns).insert({});
+ assert.eq(null, st.shard1.getDB(dbName).getLastError());
+ assert.eq(1, st.shard1.getCollection(ns).count());
+ response = st.shard1.getDB('admin').runCommand({cleanupOrphaned: ns});
+ assert.commandWorked(response);
+ assert.eq({_id: {$maxKey: 1}}, response.stoppedAtKey);
+ assert.eq(0,
+ st.shard1.getCollection(ns).count(),
+ "cleanupOrphaned didn't delete orphan on empty shard.");
+
+ /*****************************************************************************
+ * Bad startingFromKeys.
+ ****************************************************************************/
+
+ // startingFromKey of MaxKey.
+ response = shardAdmin.runCommand({cleanupOrphaned: ns, startingFromKey: {_id: MaxKey}});
+ assert.commandWorked(response);
+ assert.eq(null, response.stoppedAtKey);
+
+ // startingFromKey doesn't match number of fields in shard key.
+ assert.commandFailed(shardAdmin.runCommand(
+ {cleanupOrphaned: ns, startingFromKey: {someKey: 'someValue', someOtherKey: 1}}));
+
+ // startingFromKey matches number of fields in shard key but not field names.
+ assert.commandFailed(
+ shardAdmin.runCommand({cleanupOrphaned: ns, startingFromKey: {someKey: 'someValue'}}));
+
+ var coll2 = mongos.getCollection('foo.baz');
+
+ assert.commandWorked(
+ mongosAdmin.runCommand({shardCollection: coll2.getFullName(), key: {a: 1, b: 1}}));
+
+ // startingFromKey doesn't match number of fields in shard key.
+ assert.commandFailed(shardAdmin.runCommand(
+ {cleanupOrphaned: coll2.getFullName(), startingFromKey: {someKey: 'someValue'}}));
+
+ // startingFromKey matches number of fields in shard key but not field names.
+ assert.commandFailed(shardAdmin.runCommand(
+ {cleanupOrphaned: coll2.getFullName(), startingFromKey: {a: 'someValue', c: 1}}));
+
+ st.stop();
})();
diff --git a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js
index 1948a4bed5c..db8b6d22010 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk.js
@@ -1,162 +1,150 @@
//
// Tests cleanupOrphaned concurrent with moveChunk.
-// Inserts orphan documents to the donor and recipient shards during the moveChunk and
+// Inserts orphan documents to the donor and recipient shards during the moveChunk and
// verifies that cleanupOrphaned removes orphans.
//
load('./jstests/libs/chunk_manipulation_util.js');
load('./jstests/libs/cleanup_orphaned_util.js');
-(function() {
-"use strict";
-
-var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-var st = new ShardingTest({shards: 2, other: { separateConfig: true }});
-
-var mongos = st.s0,
- admin = mongos.getDB('admin'),
- shards = mongos.getCollection('config.shards').find().toArray(),
- dbName = 'foo',
- ns = dbName + '.bar',
- coll = mongos.getCollection(ns),
- donor = st.shard0,
- recipient = st.shard1,
- donorColl = donor.getCollection(ns),
- recipientColl = st.shard1.getCollection(ns);
-
-// Three chunks of 10 documents each, with ids -20, -18, -16, ..., 38.
-// Donor: [minKey, 0) [0, 20)
-// Recipient: [20, maxKey)
-assert.commandWorked( admin.runCommand({enableSharding: dbName}) );
-printjson( admin.runCommand({movePrimary: dbName, to: shards[0]._id}) );
-assert.commandWorked( admin.runCommand({shardCollection: ns, key: {_id: 1}}) );
-assert.commandWorked( admin.runCommand({split: ns, middle: {_id: 0}}) );
-assert.commandWorked( admin.runCommand({split: ns, middle: {_id: 20}}) );
-assert.commandWorked( admin.runCommand({moveChunk: ns,
- find: {_id: 20},
- to: shards[1]._id,
- _waitForDelete: true}) );
-
-jsTest.log('Inserting 40 docs into shard 0....');
-for (var i = -20; i < 20; i += 2) coll.insert({_id: i});
-assert.eq(null, coll.getDB().getLastError());
-assert.eq(20, donorColl.count());
-
-jsTest.log('Inserting 25 docs into shard 1....');
-for (i = 20; i < 40; i += 2) coll.insert({_id: i});
-assert.eq(null, coll.getDB().getLastError());
-assert.eq(10, recipientColl.count());
-
-//
-// Start a moveChunk in the background. Move chunk [0, 20), which has 10 docs,
-// from shard 0 to shard 1. Pause it at some points in the donor's and
-// recipient's work flows, and test cleanupOrphaned on shard 0 and shard 1.
-//
-
-jsTest.log('setting failpoint startedMoveChunk');
-pauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
-pauseMigrateAtStep(recipient, migrateStepNames.cloned);
-var joinMoveChunk = moveChunkParallel(
- staticMongod,
- st.s0.host,
- {_id: 0},
- null,
- coll.getFullName(),
- shards[1]._id);
-
-waitForMoveChunkStep(donor, moveChunkStepNames.startedMoveChunk);
-waitForMigrateStep(recipient, migrateStepNames.cloned);
-// Recipient has run _recvChunkStart and begun its migration thread; docs have
-// been cloned and chunk [0, 20) is noted as 'pending' on recipient.
-
-// Donor: [minKey, 0) [0, 20)
-// Recipient (pending): [0, 20)
-// Recipient: [20, maxKey)
-
-// Create orphans. I'll show an orphaned doc on donor with _id 26 like {26}:
-//
-// Donor: [minKey, 0) [0, 20) {26}
-// Recipient (pending): [0, 20)
-// Recipient: {-1} [20, maxKey)
-donorColl.insert([{_id: 26}]);
-assert.eq(null, donorColl.getDB().getLastError());
-assert.eq(21, donorColl.count());
-recipientColl.insert([{_id: -1}]);
-assert.eq(null, recipientColl.getDB().getLastError());
-assert.eq(21, recipientColl.count());
-
-cleanupOrphaned(donor, ns, 2);
-assert.eq(20, donorColl.count());
-cleanupOrphaned(recipient, ns, 2);
-assert.eq(20, recipientColl.count());
-
-jsTest.log('Inserting document on donor side');
-// Inserted a new document (not an orphan) with id 19, which belongs in the
-// [0, 20) chunk.
-donorColl.insert({_id: 19});
-assert.eq(null, coll.getDB().getLastError());
-assert.eq(21, donorColl.count());
-
-// Recipient transfers this modification.
-jsTest.log('Let migrate proceed to transferredMods');
-pauseMigrateAtStep(recipient, migrateStepNames.catchup);
-unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
-waitForMigrateStep(recipient, migrateStepNames.catchup);
-jsTest.log('Done letting migrate proceed to transferredMods');
-
-assert.eq(
- 21, recipientColl.count(),
- "Recipient didn't transfer inserted document.");
-
-cleanupOrphaned(donor, ns, 2);
-assert.eq(21, donorColl.count());
-cleanupOrphaned(recipient, ns, 2);
-assert.eq(21, recipientColl.count());
-
-// Create orphans.
-donorColl.insert([{_id: 26}]);
-assert.eq(null, donorColl.getDB().getLastError());
-assert.eq(22, donorColl.count());
-recipientColl.insert([{_id: -1}]);
-assert.eq(null, recipientColl.getDB().getLastError());
-assert.eq(22, recipientColl.count());
-
-cleanupOrphaned(donor, ns, 2);
-assert.eq(21, donorColl.count());
-cleanupOrphaned(recipient, ns, 2);
-assert.eq(21, recipientColl.count());
-
-// Recipient has been waiting for donor to call _recvChunkCommit.
-pauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
-unpauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
-proceedToMigrateStep(recipient, migrateStepNames.steady);
-proceedToMigrateStep(recipient, migrateStepNames.done);
-
-// Create orphans.
-donorColl.insert([{_id: 26}]);
-assert.eq(null, donorColl.getDB().getLastError());
-assert.eq(22, donorColl.count());
-recipientColl.insert([{_id: -1}]);
-assert.eq(null, recipientColl.getDB().getLastError());
-assert.eq(22, recipientColl.count());
-
-// cleanupOrphaned should still fail on donor, but should work on the recipient
-cleanupOrphaned(donor, ns, 2);
-assert.eq(10, donorColl.count());
-cleanupOrphaned(recipient, ns, 2);
-assert.eq(21, recipientColl.count());
-
-// Let migration thread complete.
-unpauseMigrateAtStep(recipient, migrateStepNames.done);
-unpauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
-joinMoveChunk();
-
-// Donor has finished post-move delete.
-cleanupOrphaned(donor, ns, 2); // this is necessary for the count to not be 11
-assert.eq(10, donorColl.count());
-assert.eq(21, recipientColl.count());
-assert.eq(31, coll.count());
-
-st.stop();
+(function() {
+ "use strict";
+
+ var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+ var st = new ShardingTest({shards: 2, other: {separateConfig: true}});
+
+ var mongos = st.s0, admin = mongos.getDB('admin'),
+ shards = mongos.getCollection('config.shards').find().toArray(), dbName = 'foo',
+ ns = dbName + '.bar', coll = mongos.getCollection(ns), donor = st.shard0,
+ recipient = st.shard1, donorColl = donor.getCollection(ns),
+ recipientColl = st.shard1.getCollection(ns);
+
+ // Three chunks of 10 documents each, with ids -20, -18, -16, ..., 38.
+ // Donor: [minKey, 0) [0, 20)
+ // Recipient: [20, maxKey)
+ assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+ printjson(admin.runCommand({movePrimary: dbName, to: shards[0]._id}));
+ assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
+ assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 0}}));
+ assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 20}}));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: ns, find: {_id: 20}, to: shards[1]._id, _waitForDelete: true}));
+
+ jsTest.log('Inserting 40 docs into shard 0....');
+ for (var i = -20; i < 20; i += 2)
+ coll.insert({_id: i});
+ assert.eq(null, coll.getDB().getLastError());
+ assert.eq(20, donorColl.count());
+
+ jsTest.log('Inserting 25 docs into shard 1....');
+ for (i = 20; i < 40; i += 2)
+ coll.insert({_id: i});
+ assert.eq(null, coll.getDB().getLastError());
+ assert.eq(10, recipientColl.count());
+
+ //
+ // Start a moveChunk in the background. Move chunk [0, 20), which has 10 docs,
+ // from shard 0 to shard 1. Pause it at some points in the donor's and
+ // recipient's work flows, and test cleanupOrphaned on shard 0 and shard 1.
+ //
+
+ jsTest.log('setting failpoint startedMoveChunk');
+ pauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
+ pauseMigrateAtStep(recipient, migrateStepNames.cloned);
+ var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {_id: 0}, null, coll.getFullName(), shards[1]._id);
+
+ waitForMoveChunkStep(donor, moveChunkStepNames.startedMoveChunk);
+ waitForMigrateStep(recipient, migrateStepNames.cloned);
+ // Recipient has run _recvChunkStart and begun its migration thread; docs have
+ // been cloned and chunk [0, 20) is noted as 'pending' on recipient.
+
+ // Donor: [minKey, 0) [0, 20)
+ // Recipient (pending): [0, 20)
+ // Recipient: [20, maxKey)
+
+ // Create orphans. I'll show an orphaned doc on donor with _id 26 like {26}:
+ //
+ // Donor: [minKey, 0) [0, 20) {26}
+ // Recipient (pending): [0, 20)
+ // Recipient: {-1} [20, maxKey)
+ donorColl.insert([{_id: 26}]);
+ assert.eq(null, donorColl.getDB().getLastError());
+ assert.eq(21, donorColl.count());
+ recipientColl.insert([{_id: -1}]);
+ assert.eq(null, recipientColl.getDB().getLastError());
+ assert.eq(21, recipientColl.count());
+
+ cleanupOrphaned(donor, ns, 2);
+ assert.eq(20, donorColl.count());
+ cleanupOrphaned(recipient, ns, 2);
+ assert.eq(20, recipientColl.count());
+
+ jsTest.log('Inserting document on donor side');
+ // Inserted a new document (not an orphan) with id 19, which belongs in the
+ // [0, 20) chunk.
+ donorColl.insert({_id: 19});
+ assert.eq(null, coll.getDB().getLastError());
+ assert.eq(21, donorColl.count());
+
+ // Recipient transfers this modification.
+ jsTest.log('Let migrate proceed to transferredMods');
+ pauseMigrateAtStep(recipient, migrateStepNames.catchup);
+ unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
+ waitForMigrateStep(recipient, migrateStepNames.catchup);
+ jsTest.log('Done letting migrate proceed to transferredMods');
+
+ assert.eq(21, recipientColl.count(), "Recipient didn't transfer inserted document.");
+
+ cleanupOrphaned(donor, ns, 2);
+ assert.eq(21, donorColl.count());
+ cleanupOrphaned(recipient, ns, 2);
+ assert.eq(21, recipientColl.count());
+
+ // Create orphans.
+ donorColl.insert([{_id: 26}]);
+ assert.eq(null, donorColl.getDB().getLastError());
+ assert.eq(22, donorColl.count());
+ recipientColl.insert([{_id: -1}]);
+ assert.eq(null, recipientColl.getDB().getLastError());
+ assert.eq(22, recipientColl.count());
+
+ cleanupOrphaned(donor, ns, 2);
+ assert.eq(21, donorColl.count());
+ cleanupOrphaned(recipient, ns, 2);
+ assert.eq(21, recipientColl.count());
+
+ // Recipient has been waiting for donor to call _recvChunkCommit.
+ pauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
+ unpauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
+ proceedToMigrateStep(recipient, migrateStepNames.steady);
+ proceedToMigrateStep(recipient, migrateStepNames.done);
+
+ // Create orphans.
+ donorColl.insert([{_id: 26}]);
+ assert.eq(null, donorColl.getDB().getLastError());
+ assert.eq(22, donorColl.count());
+ recipientColl.insert([{_id: -1}]);
+ assert.eq(null, recipientColl.getDB().getLastError());
+ assert.eq(22, recipientColl.count());
+
+ // cleanupOrphaned should still fail on donor, but should work on the recipient
+ cleanupOrphaned(donor, ns, 2);
+ assert.eq(10, donorColl.count());
+ cleanupOrphaned(recipient, ns, 2);
+ assert.eq(21, recipientColl.count());
+
+ // Let migration thread complete.
+ unpauseMigrateAtStep(recipient, migrateStepNames.done);
+ unpauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
+ joinMoveChunk();
+
+ // Donor has finished post-move delete.
+ cleanupOrphaned(donor, ns, 2); // this is necessary for the count to not be 11
+ assert.eq(10, donorColl.count());
+ assert.eq(21, recipientColl.count());
+ assert.eq(31, coll.count());
+
+ st.stop();
})();
diff --git a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
index d4d523bbd06..58ea9e806fd 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_during_movechunk_hashed.js
@@ -1,129 +1,124 @@
//
//
// Tests cleanupOrphaned concurrent with moveChunk with a hashed shard key.
-// Inserts orphan documents to the donor and recipient shards during the moveChunk and
+// Inserts orphan documents to the donor and recipient shards during the moveChunk and
// verifies that cleanupOrphaned removes orphans.
//
load('./jstests/libs/chunk_manipulation_util.js');
load('./jstests/libs/cleanup_orphaned_util.js');
-(function() {
-"use strict";
-
-var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-var st = new ShardingTest({ shards: 2, other: { separateConfig: true } });
-
-var mongos = st.s0,
- admin = mongos.getDB('admin'),
- shards = mongos.getCollection('config.shards').find().toArray(),
- dbName = 'foo',
- ns = dbName + '.bar',
- coll = mongos.getCollection(ns);
-
-assert.commandWorked( admin.runCommand({enableSharding: dbName}) );
-printjson(admin.runCommand({movePrimary: dbName, to: shards[0]._id}));
-assert.commandWorked( admin.runCommand({shardCollection: ns, key: {key: 'hashed'}}) );
-
-// Makes four chunks by default, two on each shard.
-var chunks = st.config.chunks.find().sort({min: 1}).toArray();
-assert.eq(4, chunks.length);
-
-var chunkWithDoc = chunks[1];
-print('Trying to make doc that hashes to this chunk: '
- + tojson(chunkWithDoc));
-
-var found = false;
-for (var i = 0; i < 10000; i++) {
- var doc = {key: ObjectId()},
- hash = mongos.adminCommand({_hashBSONElement: doc.key}).out;
-
- print('doc.key ' + doc.key + ' hashes to ' + hash);
-
- if (mongos.getCollection('config.chunks').findOne({
- _id: chunkWithDoc._id,
- 'min.key': {$lte: hash},
- 'max.key': {$gt: hash}
- })) {
- found = true;
- break;
- }
-}
+(function() {
+ "use strict";
-assert(found, "Couldn't make doc that belongs to chunk 1.");
-print('Doc: ' + tojson(doc));
-coll.insert(doc);
-assert.eq(null, coll.getDB().getLastError());
+ var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+ var st = new ShardingTest({shards: 2, other: {separateConfig: true}});
-//
-// Start a moveChunk in the background from shard 0 to shard 1. Pause it at
-// some points in the donor's and recipient's work flows, and test
-// cleanupOrphaned.
-//
+ var mongos = st.s0, admin = mongos.getDB('admin'),
+ shards = mongos.getCollection('config.shards').find().toArray(), dbName = 'foo',
+ ns = dbName + '.bar', coll = mongos.getCollection(ns);
+
+ assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+ printjson(admin.runCommand({movePrimary: dbName, to: shards[0]._id}));
+ assert.commandWorked(admin.runCommand({shardCollection: ns, key: {key: 'hashed'}}));
+
+ // Makes four chunks by default, two on each shard.
+ var chunks = st.config.chunks.find().sort({min: 1}).toArray();
+ assert.eq(4, chunks.length);
+
+ var chunkWithDoc = chunks[1];
+ print('Trying to make doc that hashes to this chunk: ' + tojson(chunkWithDoc));
+
+ var found = false;
+ for (var i = 0; i < 10000; i++) {
+ var doc =
+ {
+ key: ObjectId()
+ },
+ hash = mongos.adminCommand({_hashBSONElement: doc.key}).out;
+
+ print('doc.key ' + doc.key + ' hashes to ' + hash);
+
+ if (mongos.getCollection('config.chunks')
+ .findOne(
+ {_id: chunkWithDoc._id, 'min.key': {$lte: hash}, 'max.key': {$gt: hash}})) {
+ found = true;
+ break;
+ }
+ }
+
+ assert(found, "Couldn't make doc that belongs to chunk 1.");
+ print('Doc: ' + tojson(doc));
+ coll.insert(doc);
+ assert.eq(null, coll.getDB().getLastError());
+
+ //
+ // Start a moveChunk in the background from shard 0 to shard 1. Pause it at
+ // some points in the donor's and recipient's work flows, and test
+ // cleanupOrphaned.
+ //
+
+ var donor, recip;
+ if (chunkWithDoc.shard == st.shard0.shardName) {
+ donor = st.shard0;
+ recip = st.shard1;
+ } else {
+ recip = st.shard0;
+ donor = st.shard1;
+ }
-var donor, recip;
-if (chunkWithDoc.shard == st.shard0.shardName) {
- donor = st.shard0;
- recip = st.shard1;
-} else {
- recip = st.shard0;
- donor = st.shard1;
-}
-
-jsTest.log('setting failpoint startedMoveChunk');
-pauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
-pauseMigrateAtStep(recip, migrateStepNames.cloned);
-
-var joinMoveChunk = moveChunkParallel(
- staticMongod,
- st.s0.host,
- null,
- [chunkWithDoc.min, chunkWithDoc.max], // bounds
- coll.getFullName(),
- recip.shardName);
-
-waitForMoveChunkStep(donor, moveChunkStepNames.startedMoveChunk);
-waitForMigrateStep(recip, migrateStepNames.cloned);
-proceedToMigrateStep(recip, migrateStepNames.catchup);
-// recipient has run _recvChunkStart and begun its migration thread;
-// 'doc' has been cloned and chunkWithDoc is noted as 'pending' on recipient.
-
-var donorColl = donor.getCollection(ns),
- recipColl = recip.getCollection(ns);
-
-assert.eq(1, donorColl.count());
-assert.eq(1, recipColl.count());
-
-// cleanupOrphaned should go through two iterations, since the default chunk
-// setup leaves two unowned ranges on each shard.
-cleanupOrphaned(donor, ns, 2);
-cleanupOrphaned(recip, ns, 2);
-assert.eq(1, donorColl.count());
-assert.eq(1, recipColl.count());
-
-// recip has been waiting for donor to call _recvChunkCommit.
-pauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
-unpauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
-proceedToMigrateStep(recip, migrateStepNames.steady);
-proceedToMigrateStep(recip, migrateStepNames.done);
-
-// cleanupOrphaned removes migrated data from donor. The donor would
-// otherwise clean them up itself, in the post-move delete phase.
-cleanupOrphaned(donor, ns, 2);
-assert.eq(0, donorColl.count());
-cleanupOrphaned(recip, ns, 2);
-assert.eq(1, recipColl.count());
-
-// Let migration thread complete.
-unpauseMigrateAtStep(recip, migrateStepNames.done);
-unpauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
-joinMoveChunk();
-
-// donor has finished post-move delete.
-assert.eq(0, donorColl.count());
-assert.eq(1, recipColl.count());
-assert.eq(1, coll.count());
-
-st.stop();
+ jsTest.log('setting failpoint startedMoveChunk');
+ pauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
+ pauseMigrateAtStep(recip, migrateStepNames.cloned);
+
+ var joinMoveChunk = moveChunkParallel(staticMongod,
+ st.s0.host,
+ null,
+ [chunkWithDoc.min, chunkWithDoc.max], // bounds
+ coll.getFullName(),
+ recip.shardName);
+
+ waitForMoveChunkStep(donor, moveChunkStepNames.startedMoveChunk);
+ waitForMigrateStep(recip, migrateStepNames.cloned);
+ proceedToMigrateStep(recip, migrateStepNames.catchup);
+ // recipient has run _recvChunkStart and begun its migration thread;
+ // 'doc' has been cloned and chunkWithDoc is noted as 'pending' on recipient.
+
+ var donorColl = donor.getCollection(ns), recipColl = recip.getCollection(ns);
+
+ assert.eq(1, donorColl.count());
+ assert.eq(1, recipColl.count());
+
+ // cleanupOrphaned should go through two iterations, since the default chunk
+ // setup leaves two unowned ranges on each shard.
+ cleanupOrphaned(donor, ns, 2);
+ cleanupOrphaned(recip, ns, 2);
+ assert.eq(1, donorColl.count());
+ assert.eq(1, recipColl.count());
+
+ // recip has been waiting for donor to call _recvChunkCommit.
+ pauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
+ unpauseMoveChunkAtStep(donor, moveChunkStepNames.startedMoveChunk);
+ proceedToMigrateStep(recip, migrateStepNames.steady);
+ proceedToMigrateStep(recip, migrateStepNames.done);
+
+ // cleanupOrphaned removes migrated data from donor. The donor would
+ // otherwise clean them up itself, in the post-move delete phase.
+ cleanupOrphaned(donor, ns, 2);
+ assert.eq(0, donorColl.count());
+ cleanupOrphaned(recip, ns, 2);
+ assert.eq(1, recipColl.count());
+
+ // Let migration thread complete.
+ unpauseMigrateAtStep(recip, migrateStepNames.done);
+ unpauseMoveChunkAtStep(donor, moveChunkStepNames.committed);
+ joinMoveChunk();
+
+ // donor has finished post-move delete.
+ assert.eq(0, donorColl.count());
+ assert.eq(1, recipColl.count());
+ assert.eq(1, coll.count());
+
+ st.stop();
})();
diff --git a/jstests/sharding/cleanup_orphaned_cmd_hashed.js b/jstests/sharding/cleanup_orphaned_cmd_hashed.js
index 00294087885..48a08cd43f6 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_hashed.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_hashed.js
@@ -2,71 +2,78 @@
// Tests cleanup of orphaned data in hashed sharded coll via the orphaned data cleanup command
//
-(function() {
-"use strict";
-
-var st = new ShardingTest({ shards : 2, mongos : 1, other : { shardOptions : { verbose : 2 } } });
-
-var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var shards = mongos.getCollection( "config.shards" ).find().toArray();
-var coll = mongos.getCollection( "foo.bar" );
-
-assert.commandWorked( admin.runCommand({ enableSharding : coll.getDB() + "" }) );
-printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
-assert.commandWorked( admin.runCommand({ shardCollection : coll + "", key : { _id : "hashed" } }) );
-
-// Create two orphaned data holes, one bounded by min or max on each shard
-
-assert.commandWorked( admin.runCommand({ split : coll + "", middle : { _id : NumberLong(-100) } }) );
-assert.commandWorked( admin.runCommand({ split : coll + "", middle : { _id : NumberLong(-50) } }) );
-assert.commandWorked( admin.runCommand({ split : coll + "", middle : { _id : NumberLong(50) } }) );
-assert.commandWorked( admin.runCommand({ split : coll + "", middle : { _id : NumberLong(100) } }) );
-assert.commandWorked( admin.runCommand({ moveChunk : coll + "", bounds : [{ _id : NumberLong(-100) },
- { _id : NumberLong(-50) }],
- to : shards[1]._id,
- _waitForDelete : true }) );
-assert.commandWorked( admin.runCommand({ moveChunk : coll + "", bounds : [{ _id : NumberLong(50) },
- { _id : NumberLong(100) }],
- to : shards[0]._id,
- _waitForDelete : true }) );
-st.printShardingStatus();
-
-jsTest.log( "Inserting some docs on each shard, so 1/2 will be orphaned..." );
-
-for ( var s = 0; s < 2; s++ ) {
- var shardColl = ( s == 0 ? st.shard0 : st.shard1 ).getCollection( coll + "" );
- var bulk = shardColl.initializeUnorderedBulkOp();
- for ( var i = 0; i < 100; i++ ) bulk.insert({ _id : i });
- assert.writeOK(bulk.execute());
-}
-
-assert.eq( 200, st.shard0.getCollection( coll + "" ).find().itcount() +
- st.shard1.getCollection( coll + "" ).find().itcount() );
-assert.eq( 100, coll.find().itcount() );
-
-jsTest.log( "Cleaning up orphaned data in hashed coll..." );
-
-for ( var s = 0; s < 2; s++ ) {
- var shardAdmin = ( s == 0 ? st.shard0 : st.shard1 ).getDB( "admin" );
-
- var result = shardAdmin.runCommand({ cleanupOrphaned : coll + "" });
- while ( result.ok && result.stoppedAtKey ) {
- printjson( result );
- result = shardAdmin.runCommand({ cleanupOrphaned : coll + "",
- startingFromKey : result.stoppedAtKey });
+(function() {
+ "use strict";
+
+ var st = new ShardingTest({shards: 2, mongos: 1, other: {shardOptions: {verbose: 2}}});
+
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var shards = mongos.getCollection("config.shards").find().toArray();
+ var coll = mongos.getCollection("foo.bar");
+
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id}));
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: "hashed"}}));
+
+ // Create two orphaned data holes, one bounded by min or max on each shard
+
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(-100)}}));
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(-50)}}));
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(50)}}));
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: NumberLong(100)}}));
+ assert.commandWorked(admin.runCommand({
+ moveChunk: coll + "",
+ bounds: [{_id: NumberLong(-100)}, {_id: NumberLong(-50)}],
+ to: shards[1]._id,
+ _waitForDelete: true
+ }));
+ assert.commandWorked(admin.runCommand({
+ moveChunk: coll + "",
+ bounds: [{_id: NumberLong(50)}, {_id: NumberLong(100)}],
+ to: shards[0]._id,
+ _waitForDelete: true
+ }));
+ st.printShardingStatus();
+
+ jsTest.log("Inserting some docs on each shard, so 1/2 will be orphaned...");
+
+ for (var s = 0; s < 2; s++) {
+ var shardColl = (s == 0 ? st.shard0 : st.shard1).getCollection(coll + "");
+ var bulk = shardColl.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; i++)
+ bulk.insert({_id: i});
+ assert.writeOK(bulk.execute());
}
-
- printjson( result );
- assert( result.ok );
-}
-assert.eq( 100, st.shard0.getCollection( coll + "" ).find().itcount() +
- st.shard1.getCollection( coll + "" ).find().itcount() );
-assert.eq( 100, coll.find().itcount() );
+ assert.eq(200,
+ st.shard0.getCollection(coll + "").find().itcount() +
+ st.shard1.getCollection(coll + "").find().itcount());
+ assert.eq(100, coll.find().itcount());
-jsTest.log( "DONE!" );
+ jsTest.log("Cleaning up orphaned data in hashed coll...");
-st.stop();
+ for (var s = 0; s < 2; s++) {
+ var shardAdmin = (s == 0 ? st.shard0 : st.shard1).getDB("admin");
+
+ var result = shardAdmin.runCommand({cleanupOrphaned: coll + ""});
+ while (result.ok && result.stoppedAtKey) {
+ printjson(result);
+ result = shardAdmin.runCommand(
+ {cleanupOrphaned: coll + "", startingFromKey: result.stoppedAtKey});
+ }
+
+ printjson(result);
+ assert(result.ok);
+ }
+
+ assert.eq(100,
+ st.shard0.getCollection(coll + "").find().itcount() +
+ st.shard1.getCollection(coll + "").find().itcount());
+ assert.eq(100, coll.find().itcount());
+
+ jsTest.log("DONE!");
+
+ st.stop();
})();
diff --git a/jstests/sharding/cleanup_orphaned_cmd_prereload.js b/jstests/sharding/cleanup_orphaned_cmd_prereload.js
index 3dfc68ca9a3..7155baea970 100644
--- a/jstests/sharding/cleanup_orphaned_cmd_prereload.js
+++ b/jstests/sharding/cleanup_orphaned_cmd_prereload.js
@@ -2,94 +2,90 @@
// Tests failed cleanup of orphaned data when we have pending chunks
//
-var st = new ShardingTest({ shards: 2 });
+var st = new ShardingTest({shards: 2});
var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var shards = mongos.getCollection( "config.shards" ).find().toArray();
-var coll = mongos.getCollection( "foo.bar" );
+var admin = mongos.getDB("admin");
+var shards = mongos.getCollection("config.shards").find().toArray();
+var coll = mongos.getCollection("foo.bar");
-assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
-printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
-assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
+assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id}));
+assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
-jsTest.log( "Moving some chunks to shard1..." );
+jsTest.log("Moving some chunks to shard1...");
-assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 1 } }).ok );
+assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
+assert(admin.runCommand({split: coll + "", middle: {_id: 1}}).ok);
-assert( admin.runCommand({ moveChunk : coll + "",
- find : { _id : 0 },
- to : shards[1]._id,
- _waitForDelete : true }).ok );
-assert( admin.runCommand({ moveChunk : coll + "",
- find : { _id : 1 },
- to : shards[1]._id,
- _waitForDelete : true }).ok );
+assert(admin.runCommand(
+ {moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id, _waitForDelete: true})
+ .ok);
+assert(admin.runCommand(
+ {moveChunk: coll + "", find: {_id: 1}, to: shards[1]._id, _waitForDelete: true})
+ .ok);
-var metadata = st.shard1.getDB( "admin" )
- .runCommand({ getShardVersion : coll + "", fullMetadata : true }).metadata;
+var metadata =
+ st.shard1.getDB("admin").runCommand({getShardVersion: coll + "", fullMetadata: true}).metadata;
-printjson( metadata );
+printjson(metadata);
-assert.eq( metadata.pending[0][0]._id, 1 );
-assert.eq( metadata.pending[0][1]._id, MaxKey );
+assert.eq(metadata.pending[0][0]._id, 1);
+assert.eq(metadata.pending[0][1]._id, MaxKey);
-jsTest.log( "Ensuring we won't remove orphaned data in pending chunk..." );
+jsTest.log("Ensuring we won't remove orphaned data in pending chunk...");
-assert( !st.shard1.getDB( "admin" )
- .runCommand({ cleanupOrphaned : coll + "", startingFromKey : { _id : 1 } }).stoppedAtKey );
+assert(!st.shard1.getDB("admin")
+ .runCommand({cleanupOrphaned: coll + "", startingFromKey: {_id: 1}})
+ .stoppedAtKey);
-jsTest.log( "Moving some chunks back to shard0 after empty..." );
+jsTest.log("Moving some chunks back to shard0 after empty...");
-admin.runCommand({ moveChunk : coll + "",
- find : { _id : -1 },
- to : shards[1]._id,
- _waitForDelete : true });
+admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: shards[1]._id, _waitForDelete: true});
-var metadata = st.shard0.getDB( "admin" )
- .runCommand({ getShardVersion : coll + "", fullMetadata : true }).metadata;
+var metadata =
+ st.shard0.getDB("admin").runCommand({getShardVersion: coll + "", fullMetadata: true}).metadata;
-printjson( metadata );
+printjson(metadata);
-assert.eq( metadata.shardVersion.t, 0 );
-assert.neq( metadata.collVersion.t, 0 );
-assert.eq( metadata.pending.length, 0 );
+assert.eq(metadata.shardVersion.t, 0);
+assert.neq(metadata.collVersion.t, 0);
+assert.eq(metadata.pending.length, 0);
-assert( admin.runCommand({ moveChunk : coll + "",
- find : { _id : 1 },
- to : shards[0]._id,
- _waitForDelete : true }).ok );
+assert(admin.runCommand(
+ {moveChunk: coll + "", find: {_id: 1}, to: shards[0]._id, _waitForDelete: true})
+ .ok);
-var metadata = st.shard0.getDB( "admin" )
- .runCommand({ getShardVersion : coll + "", fullMetadata : true }).metadata;
+var metadata =
+ st.shard0.getDB("admin").runCommand({getShardVersion: coll + "", fullMetadata: true}).metadata;
-printjson( metadata );
-assert.eq( metadata.shardVersion.t, 0 );
-assert.neq( metadata.collVersion.t, 0 );
-assert.eq( metadata.pending[0][0]._id, 1 );
-assert.eq( metadata.pending[0][1]._id, MaxKey );
+printjson(metadata);
+assert.eq(metadata.shardVersion.t, 0);
+assert.neq(metadata.collVersion.t, 0);
+assert.eq(metadata.pending[0][0]._id, 1);
+assert.eq(metadata.pending[0][1]._id, MaxKey);
-jsTest.log( "Ensuring again we won't remove orphaned data in pending chunk..." );
+jsTest.log("Ensuring again we won't remove orphaned data in pending chunk...");
-assert( !st.shard0.getDB( "admin" )
- .runCommand({ cleanupOrphaned : coll + "", startingFromKey : { _id : 1 } }).stoppedAtKey );
+assert(!st.shard0.getDB("admin")
+ .runCommand({cleanupOrphaned: coll + "", startingFromKey: {_id: 1}})
+ .stoppedAtKey);
-jsTest.log( "Checking that pending chunk is promoted on reload..." );
+jsTest.log("Checking that pending chunk is promoted on reload...");
-assert.eq( null, coll.findOne({ _id : 1 }) );
+assert.eq(null, coll.findOne({_id: 1}));
-var metadata = st.shard0.getDB( "admin" )
- .runCommand({ getShardVersion : coll + "", fullMetadata : true }).metadata;
+var metadata =
+ st.shard0.getDB("admin").runCommand({getShardVersion: coll + "", fullMetadata: true}).metadata;
-printjson( metadata );
-assert.neq( metadata.shardVersion.t, 0 );
-assert.neq( metadata.collVersion.t, 0 );
-assert.eq( metadata.chunks[0][0]._id, 1 );
-assert.eq( metadata.chunks[0][1]._id, MaxKey );
+printjson(metadata);
+assert.neq(metadata.shardVersion.t, 0);
+assert.neq(metadata.collVersion.t, 0);
+assert.eq(metadata.chunks[0][0]._id, 1);
+assert.eq(metadata.chunks[0][1]._id, MaxKey);
st.printShardingStatus();
-jsTest.log( "DONE!" );
+jsTest.log("DONE!");
st.stop();
diff --git a/jstests/sharding/coll_epoch_test0.js b/jstests/sharding/coll_epoch_test0.js
index d294a7e0998..0ec0a5d3201 100644
--- a/jstests/sharding/coll_epoch_test0.js
+++ b/jstests/sharding/coll_epoch_test0.js
@@ -1,45 +1,51 @@
// Tests whether a split and a migrate in a sharded cluster preserve the epoch
-var st = new ShardingTest( { shards : 2, mongos : 1 } );
+var st = new ShardingTest({shards: 2, mongos: 1});
// Balancer is by default stopped, thus it will not interfere
-var config = st.s.getDB( "config" );
-var admin = st.s.getDB( "admin" );
-var coll = st.s.getCollection( "foo.bar" );
+var config = st.s.getDB("config");
+var admin = st.s.getDB("admin");
+var coll = st.s.getCollection("foo.bar");
// First enable sharding
-admin.runCommand({ enableSharding : coll.getDB() + "" });
+admin.runCommand({enableSharding: coll.getDB() + ""});
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
+admin.runCommand({shardCollection: coll + "", key: {_id: 1}});
-var primary = config.databases.find({ _id : coll.getDB() + "" }).primary;
+var primary = config.databases.find({_id: coll.getDB() + ""}).primary;
var notPrimary = null;
-config.shards.find().forEach( function( doc ){ if( doc._id != primary ) notPrimary = doc._id; } );
+config.shards.find().forEach(function(doc) {
+ if (doc._id != primary)
+ notPrimary = doc._id;
+});
var createdEpoch = null;
-var checkEpochs = function(){
- config.chunks.find({ ns : coll + "" }).forEach( function( chunk ){
-
- // Make sure the epochs exist, are non-zero, and are consistent
- assert( chunk.lastmodEpoch );
- print( chunk.lastmodEpoch + "" );
- assert.neq( chunk.lastmodEpoch + "", "000000000000000000000000" );
- if( createdEpoch == null ) createdEpoch = chunk.lastmodEpoch;
- else assert.eq( createdEpoch, chunk.lastmodEpoch );
-
- });
-};
+var checkEpochs = function() {
+ config.chunks.find({ns: coll + ""})
+ .forEach(function(chunk) {
+
+ // Make sure the epochs exist, are non-zero, and are consistent
+ assert(chunk.lastmodEpoch);
+ print(chunk.lastmodEpoch + "");
+ assert.neq(chunk.lastmodEpoch + "", "000000000000000000000000");
+ if (createdEpoch == null)
+ createdEpoch = chunk.lastmodEpoch;
+ else
+ assert.eq(createdEpoch, chunk.lastmodEpoch);
+
+ });
+};
checkEpochs();
// Now do a split
-printjson( admin.runCommand({ split : coll + "", middle : { _id : 0 } }) );
+printjson(admin.runCommand({split: coll + "", middle: {_id: 0}}));
// Check all the chunks for epochs
checkEpochs();
// Now do a migrate
-printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : notPrimary }) );
+printjson(admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: notPrimary}));
// Check all the chunks for epochs
checkEpochs();
diff --git a/jstests/sharding/coll_epoch_test1.js b/jstests/sharding/coll_epoch_test1.js
index fa24a035da7..28962732c25 100644
--- a/jstests/sharding/coll_epoch_test1.js
+++ b/jstests/sharding/coll_epoch_test1.js
@@ -1,6 +1,7 @@
-// Tests various cases of dropping and recreating collections in the same namespace with multiple mongoses
+// Tests various cases of dropping and recreating collections in the same namespace with multiple
+// mongoses
-var st = new ShardingTest({ shards : 3, mongos : 3, verbose : 1 });
+var st = new ShardingTest({shards: 3, mongos: 3, verbose: 1});
// Balancer is by default stopped, thus it will not interfere
// Use separate mongoses for admin, inserting data, and validating results, so no
@@ -8,15 +9,15 @@ var st = new ShardingTest({ shards : 3, mongos : 3, verbose : 1 });
var insertMongos = st.s2;
var staleMongos = st.s1;
-var config = st.s.getDB( "config" );
-var admin = st.s.getDB( "admin" );
-var coll = st.s.getCollection( "foo.bar" );
+var config = st.s.getDB("config");
+var admin = st.s.getDB("admin");
+var coll = st.s.getCollection("foo.bar");
-insertMongos.getDB( "admin" ).runCommand({ setParameter : 1, traceExceptions : true });
+insertMongos.getDB("admin").runCommand({setParameter: 1, traceExceptions: true});
var shards = {};
-config.shards.find().forEach( function( doc ){
- shards[ doc._id ] = new Mongo( doc.host );
+config.shards.find().forEach(function(doc) {
+ shards[doc._id] = new Mongo(doc.host);
});
//
@@ -24,53 +25,54 @@ config.shards.find().forEach( function( doc ){
// in the background
//
-jsTest.log( "Enabling sharding for the first time..." );
+jsTest.log("Enabling sharding for the first time...");
-admin.runCommand({ enableSharding : coll.getDB() + "" });
+admin.runCommand({enableSharding: coll.getDB() + ""});
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
+admin.runCommand({shardCollection: coll + "", key: {_id: 1}});
-var bulk = insertMongos.getCollection( coll + "" ).initializeUnorderedBulkOp();
-for( var i = 0; i < 100; i++ ) {
- bulk.insert({ _id : i, test : "a" });
+var bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ bulk.insert({_id: i, test: "a"});
}
-assert.writeOK( bulk.execute() );
-assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "a" }).itcount() );
+assert.writeOK(bulk.execute());
+assert.eq(100, staleMongos.getCollection(coll + "").find({test: "a"}).itcount());
coll.drop();
//
-// Test that inserts and queries go to the correct shard even when the collection has been
+// Test that inserts and queries go to the correct shard even when the collection has been
// re-sharded in the background
//
-jsTest.log( "Re-enabling sharding with a different key..." );
+jsTest.log("Re-enabling sharding with a different key...");
-admin.runCommand({ enableSharding : coll.getDB() + "" });
+admin.runCommand({enableSharding: coll.getDB() + ""});
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-coll.ensureIndex({ notId : 1 });
-admin.runCommand({ shardCollection : coll + "", key : { notId : 1 } });
+coll.ensureIndex({notId: 1});
+admin.runCommand({shardCollection: coll + "", key: {notId: 1}});
-bulk = insertMongos.getCollection( coll + "" ).initializeUnorderedBulkOp();
-for( var i = 0; i < 100; i++ ) {
- bulk.insert({ notId : i, test : "b" });
+bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++) {
+ bulk.insert({notId: i, test: "b"});
}
-assert.writeOK( bulk.execute() );
-assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "b" }).itcount() );
-assert.eq( 0, staleMongos.getCollection( coll + "" ).find({ test : { $in : [ "a" ] } }).itcount() );
+assert.writeOK(bulk.execute());
+assert.eq(100, staleMongos.getCollection(coll + "").find({test: "b"}).itcount());
+assert.eq(0, staleMongos.getCollection(coll + "").find({test: {$in: ["a"]}}).itcount());
coll.drop();
//
-// Test that inserts and queries go to the correct shard even when the collection has been
+// Test that inserts and queries go to the correct shard even when the collection has been
// unsharded and moved to a different primary
//
-jsTest.log( "Re-creating unsharded collection from a sharded collection on different primary..." );
+jsTest.log("Re-creating unsharded collection from a sharded collection on different primary...");
-var getOtherShard = function( shard ){
- for( id in shards ){
- if( id != shard ) return id;
+var getOtherShard = function(shard) {
+ for (id in shards) {
+ if (id != shard)
+ return id;
}
};
@@ -81,40 +83,42 @@ if (st.configRS) {
// the most recent config data.
st.configRS.awaitLastOpCommitted();
}
-jsTest.log( "moved primary..." );
+jsTest.log("moved primary...");
-bulk = insertMongos.getCollection( coll + "" ).initializeUnorderedBulkOp();
-for( var i = 0; i < 100; i++ )
- bulk.insert({ test : "c" });
-assert.writeOK( bulk.execute() );
+bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++)
+ bulk.insert({test: "c"});
+assert.writeOK(bulk.execute());
-assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "c" }).itcount() );
-assert.eq( 0, staleMongos.getCollection( coll + "" ).find({ test : { $in : [ "a", "b" ] } }).itcount() );
+assert.eq(100, staleMongos.getCollection(coll + "").find({test: "c"}).itcount());
+assert.eq(0, staleMongos.getCollection(coll + "").find({test: {$in: ["a", "b"]}}).itcount());
coll.drop();
//
-// Test that inserts and queries go to correct shard even when the collection has been unsharded,
+// Test that inserts and queries go to correct shard even when the collection has been unsharded,
// resharded, and moved to a different primary
//
-jsTest.log( "Re-creating sharded collection with different primary..." );
+jsTest.log("Re-creating sharded collection with different primary...");
-admin.runCommand({ enableSharding : coll.getDB() + "" });
-admin.runCommand({ movePrimary : coll.getDB() + "",
- to : getOtherShard( config.databases.findOne({ _id : coll.getDB() + "" }).primary ) });
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
+admin.runCommand({enableSharding: coll.getDB() + ""});
+admin.runCommand({
+ movePrimary: coll.getDB() + "",
+ to: getOtherShard(config.databases.findOne({_id: coll.getDB() + ""}).primary)
+});
+admin.runCommand({shardCollection: coll + "", key: {_id: 1}});
-bulk = insertMongos.getCollection( coll + "" ).initializeUnorderedBulkOp();
-for( var i = 0; i < 100; i++ )
- bulk.insert({ test : "d" });
-assert.writeOK( bulk.execute() );
+bulk = insertMongos.getCollection(coll + "").initializeUnorderedBulkOp();
+for (var i = 0; i < 100; i++)
+ bulk.insert({test: "d"});
+assert.writeOK(bulk.execute());
-assert.eq( 100, staleMongos.getCollection( coll + "" ).find({ test : "d" }).itcount() );
-assert.eq( 0, staleMongos.getCollection( coll + "" ).find({ test : { $in : [ "a", "b", "c" ] } }).itcount() );
+assert.eq(100, staleMongos.getCollection(coll + "").find({test: "d"}).itcount());
+assert.eq(0, staleMongos.getCollection(coll + "").find({test: {$in: ["a", "b", "c"]}}).itcount());
coll.drop();
-jsTest.log( "Done!" );
+jsTest.log("Done!");
st.stop();
diff --git a/jstests/sharding/coll_epoch_test2.js b/jstests/sharding/coll_epoch_test2.js
index 09109ebce43..dbed610cad6 100644
--- a/jstests/sharding/coll_epoch_test2.js
+++ b/jstests/sharding/coll_epoch_test2.js
@@ -1,10 +1,10 @@
// Tests that resharding a collection is detected correctly by all operation types
-//
+//
// The idea here is that a collection may be resharded / unsharded at any point, and any type of
// operation on a mongos may be active when it happens. All operations should handle gracefully.
//
-var st = new ShardingTest({ shards : 2, mongos : 5, verbose : 1 });
+var st = new ShardingTest({shards: 2, mongos: 5, verbose: 1});
// Balancer is by default stopped, thus it will not interfere
// Use separate mongos for reading, updating, inserting, removing data
@@ -13,40 +13,44 @@ var updateMongos = st.s2;
var insertMongos = st.s3;
var removeMongos = st.s4;
-var config = st.s.getDB( "config" );
-var admin = st.s.getDB( "admin" );
-var coll = st.s.getCollection( "foo.bar" );
+var config = st.s.getDB("config");
+var admin = st.s.getDB("admin");
+var coll = st.s.getCollection("foo.bar");
-insertMongos.getDB( "admin" ).runCommand({ setParameter : 1, traceExceptions : true });
+insertMongos.getDB("admin").runCommand({setParameter: 1, traceExceptions: true});
var shards = {};
-config.shards.find().forEach( function( doc ){
- shards[ doc._id ] = new Mongo( doc.host );
+config.shards.find().forEach(function(doc) {
+ shards[doc._id] = new Mongo(doc.host);
});
//
// Set up a sharded collection
//
-jsTest.log( "Enabling sharding for the first time..." );
+jsTest.log("Enabling sharding for the first time...");
-admin.runCommand({ enableSharding : coll.getDB() + "" });
+admin.runCommand({enableSharding: coll.getDB() + ""});
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
-
-assert.writeOK(coll.insert({ hello : "world" }));
-
-jsTest.log( "Sharding collection across multiple shards..." );
-
-var getOtherShard = function( shard ){
- for( id in shards ){
- if( id != shard ) return id;
+admin.runCommand({shardCollection: coll + "", key: {_id: 1}});
+
+assert.writeOK(coll.insert({hello: "world"}));
+
+jsTest.log("Sharding collection across multiple shards...");
+
+var getOtherShard = function(shard) {
+ for (id in shards) {
+ if (id != shard)
+ return id;
}
};
-
-printjson( admin.runCommand({ split : coll + "", middle : { _id : 0 } }) );
-printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 },
- to : getOtherShard( config.databases.findOne({ _id : coll.getDB() + "" }).primary ) }) );
+
+printjson(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+printjson(admin.runCommand({
+ moveChunk: coll + "",
+ find: {_id: 0},
+ to: getOtherShard(config.databases.findOne({_id: coll.getDB() + ""}).primary)
+}));
st.printShardingStatus();
@@ -54,11 +58,11 @@ st.printShardingStatus();
// Force all mongoses to load the current status of the cluster
//
-jsTest.log( "Loading this status in all mongoses..." );
+jsTest.log("Loading this status in all mongoses...");
-for( var i = 0; i < st._mongos.length; i++ ){
- printjson( st._mongos[i].getDB( "admin" ).runCommand({ flushRouterConfig : 1 }) );
- assert.neq( null, st._mongos[i].getCollection( coll + "" ).findOne() );
+for (var i = 0; i < st._mongos.length; i++) {
+ printjson(st._mongos[i].getDB("admin").runCommand({flushRouterConfig: 1}));
+ assert.neq(null, st._mongos[i].getCollection(coll + "").findOne());
}
//
@@ -66,57 +70,60 @@ for( var i = 0; i < st._mongos.length; i++ ){
// versions are the same, but the split is at a different point.
//
-jsTest.log( "Rebuilding sharded collection with different split..." );
+jsTest.log("Rebuilding sharded collection with different split...");
coll.drop();
-var droppedCollDoc = config.collections.findOne({ _id: coll.getFullName() });
+var droppedCollDoc = config.collections.findOne({_id: coll.getFullName()});
assert(droppedCollDoc != null);
assert.eq(true, droppedCollDoc.dropped);
assert(droppedCollDoc.lastmodEpoch != null);
assert(droppedCollDoc.lastmodEpoch.equals(new ObjectId("000000000000000000000000")),
"epoch not zero: " + droppedCollDoc.lastmodEpoch);
-admin.runCommand({ enableSharding : coll.getDB() + "" });
+admin.runCommand({enableSharding: coll.getDB() + ""});
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
+admin.runCommand({shardCollection: coll + "", key: {_id: 1}});
var bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < 100; i++ ) bulk.insert({ _id : i });
+for (var i = 0; i < 100; i++)
+ bulk.insert({_id: i});
assert.writeOK(bulk.execute());
-printjson( admin.runCommand({ split : coll + "", middle : { _id : 200 } }) );
-printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 200 },
- to : getOtherShard( config.databases.findOne({ _id : coll.getDB() + "" }).primary ) }) );
+printjson(admin.runCommand({split: coll + "", middle: {_id: 200}}));
+printjson(admin.runCommand({
+ moveChunk: coll + "",
+ find: {_id: 200},
+ to: getOtherShard(config.databases.findOne({_id: coll.getDB() + ""}).primary)
+}));
//
// Make sure all operations on mongoses aren't tricked by the change
-//
-
-jsTest.log( "Checking other mongoses for detection of change..." );
+//
+
+jsTest.log("Checking other mongoses for detection of change...");
-jsTest.log( "Checking find..." );
+jsTest.log("Checking find...");
// Ensure that finding an element works when resharding
-assert.neq( null, readMongos.getCollection( coll + "" ).findOne({ _id : 1 }) );
+assert.neq(null, readMongos.getCollection(coll + "").findOne({_id: 1}));
-jsTest.log( "Checking update...");
+jsTest.log("Checking update...");
// Ensure that updating an element finds the right location
-assert.writeOK(updateMongos.getCollection( coll + "" ).update({ _id : 1 },
- { $set : { updated : true } }));
-assert.neq( null, coll.findOne({ updated : true }) );
+assert.writeOK(updateMongos.getCollection(coll + "").update({_id: 1}, {$set: {updated: true}}));
+assert.neq(null, coll.findOne({updated: true}));
-jsTest.log( "Checking insert..." );
+jsTest.log("Checking insert...");
// Ensure that inserting an element finds the right shard
-assert.writeOK(insertMongos.getCollection( coll + "" ).insert({ _id : 101 }));
-assert.neq( null, coll.findOne({ _id : 101 }) );
+assert.writeOK(insertMongos.getCollection(coll + "").insert({_id: 101}));
+assert.neq(null, coll.findOne({_id: 101}));
-jsTest.log( "Checking remove..." );
+jsTest.log("Checking remove...");
// Ensure that removing an element finds the right shard, verified by the mongos doing the sharding
-assert.writeOK(removeMongos.getCollection( coll + "" ).remove({ _id : 2 }));
-assert.eq( null, coll.findOne({ _id : 2 }) );
+assert.writeOK(removeMongos.getCollection(coll + "").remove({_id: 2}));
+assert.eq(null, coll.findOne({_id: 2}));
coll.drop();
-jsTest.log( "Done!" );
+jsTest.log("Done!");
st.stop();
diff --git a/jstests/sharding/conf_server_write_concern.js b/jstests/sharding/conf_server_write_concern.js
index 500061d4ca1..c4e08939548 100644
--- a/jstests/sharding/conf_server_write_concern.js
+++ b/jstests/sharding/conf_server_write_concern.js
@@ -1,24 +1,21 @@
/**
* Test write concern with w parameter when writing directly to the config servers works as expected
*/
-function writeToConfigTest(){
+function writeToConfigTest() {
jsTestLog("Testing data writes to config server with write concern");
- var st = new ShardingTest({ shards: 2 });
- var confDB = st.s.getDB( 'config' );
+ var st = new ShardingTest({shards: 2});
+ var confDB = st.s.getDB('config');
- assert.writeOK(confDB.settings.update({ _id: 'balancer' },
- { $set: { stopped: true }},
- { writeConcern: { w: 'majority' }}));
+ assert.writeOK(confDB.settings.update(
+ {_id: 'balancer'}, {$set: {stopped: true}}, {writeConcern: {w: 'majority'}}));
// w:1 should still work - it gets automatically upconverted to w:majority
- assert.writeOK(confDB.settings.update({ _id: 'balancer' },
- { $set: { stopped: true }},
- { writeConcern: { w: 1 }}));
+ assert.writeOK(confDB.settings.update(
+ {_id: 'balancer'}, {$set: {stopped: true}}, {writeConcern: {w: 1}}));
// Write concerns other than w:1 and w:majority should fail.
- assert.writeError(confDB.settings.update({ _id: 'balancer' },
- { $set: { stopped: true }},
- { writeConcern: { w: 2 }}));
+ assert.writeError(confDB.settings.update(
+ {_id: 'balancer'}, {$set: {stopped: true}}, {writeConcern: {w: 2}}));
st.stop();
}
@@ -27,40 +24,43 @@ function writeToConfigTest(){
* Test write concern with w parameter will not cause an error when writes to mongos
* would trigger writes to config servers (in this test, split chunks is used).
*/
-function configTest(){
+function configTest() {
jsTestLog("Testing metadata writes to config server with write concern");
- var st = new ShardingTest({ shards: 1, rs: true, other: { chunkSize: 1 }});
-
+ var st = new ShardingTest({shards: 1, rs: true, other: {chunkSize: 1}});
+
var mongos = st.s;
- var testDB = mongos.getDB( 'test' );
+ var testDB = mongos.getDB('test');
var coll = testDB.user;
-
- testDB.adminCommand({ enableSharding: testDB.getName() });
- testDB.adminCommand({ shardCollection: coll.getFullName(), key: { x: 1 }});
-
+
+ testDB.adminCommand({enableSharding: testDB.getName()});
+ testDB.adminCommand({shardCollection: coll.getFullName(), key: {x: 1}});
+
var chunkCount = function() {
- return mongos.getDB( 'config' ).chunks.find().count();
+ return mongos.getDB('config').chunks.find().count();
};
-
+
var initChunks = chunkCount();
var currChunks = initChunks;
var gleObj = null;
var x = 0;
- var largeStr = new Array(1024*128).toString();
+ var largeStr = new Array(1024 * 128).toString();
- assert.soon(function() {
- var bulk = coll.initializeUnorderedBulkOp();
- for (var i = 0; i < 100; i++) {
- bulk.insert({x: x++, largeStr: largeStr});
- }
- assert.writeOK(bulk.execute({w: 'majority', wtimeout: 60 * 1000}));
- currChunks = chunkCount();
- return currChunks > initChunks;
- }, function() { return "currChunks: " + currChunks + ", initChunks: " + initChunks; });
+ assert.soon(
+ function() {
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; i++) {
+ bulk.insert({x: x++, largeStr: largeStr});
+ }
+ assert.writeOK(bulk.execute({w: 'majority', wtimeout: 60 * 1000}));
+ currChunks = chunkCount();
+ return currChunks > initChunks;
+ },
+ function() {
+ return "currChunks: " + currChunks + ", initChunks: " + initChunks;
+ });
st.stop();
}
writeToConfigTest();
configTest();
-
diff --git a/jstests/sharding/config_rs_change.js b/jstests/sharding/config_rs_change.js
index ac75751ee91..62e6a8f99e4 100644
--- a/jstests/sharding/config_rs_change.js
+++ b/jstests/sharding/config_rs_change.js
@@ -3,9 +3,7 @@
// of the config replset config during startup.
var configRS = new ReplSetTest({name: "configRS", nodes: 1, useHostName: true});
-configRS.startSet({ configsvr: '',
- journal: "",
- storageEngine: 'wiredTiger' });
+configRS.startSet({configsvr: '', journal: "", storageEngine: 'wiredTiger'});
var replConfig = configRS.getReplSetConfig();
replConfig.configsvr = true;
configRS.initiate(replConfig);
diff --git a/jstests/sharding/config_rs_no_primary.js b/jstests/sharding/config_rs_no_primary.js
index 9fce3421474..11d9a8e41aa 100644
--- a/jstests/sharding/config_rs_no_primary.js
+++ b/jstests/sharding/config_rs_no_primary.js
@@ -1,48 +1,54 @@
// Tests operation of the cluster when the config servers have no primary and thus the cluster
// metadata is in read-only mode.
(function() {
-"use strict";
-
-var st = new ShardingTest({shards: 1,
- other: {c0: {}, // Make sure 1st config server is primary
- c1: {rsConfig: {priority: 0}},
- c2: {rsConfig: {priority: 0}}}});
-
-assert.eq(st.config0, st.configRS.getPrimary());
-
-// Create the "test" database while the cluster metadata is still writeable.
-st.s.getDB('test').foo.insert({a:1});
-
-// Take down two of the config servers so the remaining one goes into SECONDARY state.
-st.configRS.stop(1);
-st.configRS.stop(2);
-st.configRS.awaitNoPrimary();
-
-jsTestLog("Starting a new mongos when the config servers have no primary which should work");
-var mongos2 = MongoRunner.runMongos({configdb: st.configRS.getURL()});
-assert.neq(null, mongos2);
-
-var testOps = function(mongos) {
- jsTestLog("Doing ops that don't require metadata writes and thus should succeed against: " +
- mongos);
- var initialCount = mongos.getDB('test').foo.count();
- assert.writeOK(mongos.getDB('test').foo.insert({a:1}));
- assert.eq(initialCount + 1, mongos.getDB('test').foo.count());
-
- assert.throws(function() {mongos.getDB('config').shards.findOne();});
- mongos.setSlaveOk(true);
- var shardDoc = mongos.getDB('config').shards.findOne();
- mongos.setSlaveOk(false);
- assert.neq(null, shardDoc);
-
- jsTestLog("Doing ops that require metadata writes and thus should fail against: " + mongos);
- assert.writeError(mongos.getDB("newDB").foo.insert({a:1}));
- assert.commandFailed(mongos.getDB('admin').runCommand({shardCollection: "test.foo",
- key: {a:1}}));
-};
-
-testOps(mongos2);
-testOps(st.s);
-
-st.stop();
+ "use strict";
+
+ var st = new ShardingTest({
+ shards: 1,
+ other: {
+ c0: {}, // Make sure 1st config server is primary
+ c1: {rsConfig: {priority: 0}},
+ c2: {rsConfig: {priority: 0}}
+ }
+ });
+
+ assert.eq(st.config0, st.configRS.getPrimary());
+
+ // Create the "test" database while the cluster metadata is still writeable.
+ st.s.getDB('test').foo.insert({a: 1});
+
+ // Take down two of the config servers so the remaining one goes into SECONDARY state.
+ st.configRS.stop(1);
+ st.configRS.stop(2);
+ st.configRS.awaitNoPrimary();
+
+ jsTestLog("Starting a new mongos when the config servers have no primary which should work");
+ var mongos2 = MongoRunner.runMongos({configdb: st.configRS.getURL()});
+ assert.neq(null, mongos2);
+
+ var testOps = function(mongos) {
+ jsTestLog("Doing ops that don't require metadata writes and thus should succeed against: " +
+ mongos);
+ var initialCount = mongos.getDB('test').foo.count();
+ assert.writeOK(mongos.getDB('test').foo.insert({a: 1}));
+ assert.eq(initialCount + 1, mongos.getDB('test').foo.count());
+
+ assert.throws(function() {
+ mongos.getDB('config').shards.findOne();
+ });
+ mongos.setSlaveOk(true);
+ var shardDoc = mongos.getDB('config').shards.findOne();
+ mongos.setSlaveOk(false);
+ assert.neq(null, shardDoc);
+
+ jsTestLog("Doing ops that require metadata writes and thus should fail against: " + mongos);
+ assert.writeError(mongos.getDB("newDB").foo.insert({a: 1}));
+ assert.commandFailed(
+ mongos.getDB('admin').runCommand({shardCollection: "test.foo", key: {a: 1}}));
+ };
+
+ testOps(mongos2);
+ testOps(st.s);
+
+ st.stop();
}()); \ No newline at end of file
diff --git a/jstests/sharding/conn_pool_stats.js b/jstests/sharding/conn_pool_stats.js
index 8cbf8788ae7..872c20602a5 100644
--- a/jstests/sharding/conn_pool_stats.js
+++ b/jstests/sharding/conn_pool_stats.js
@@ -4,7 +4,7 @@
var cluster = new ShardingTest({shards: 2});
// Run the connPoolStats command
-stats = cluster.s.getDB("admin").runCommand({connPoolStats : 1});
+stats = cluster.s.getDB("admin").runCommand({connPoolStats: 1});
// Validate output
printjson(stats);
diff --git a/jstests/sharding/copydb_from_mongos.js b/jstests/sharding/copydb_from_mongos.js
index aa6ac16b465..66db42407ca 100644
--- a/jstests/sharding/copydb_from_mongos.js
+++ b/jstests/sharding/copydb_from_mongos.js
@@ -1,26 +1,22 @@
(function() {
-var st = new ShardingTest({ shards: 1 });
+ var st = new ShardingTest({shards: 1});
-var testDB = st.s.getDB('test');
-assert.writeOK(testDB.foo.insert({ a: 1 }));
+ var testDB = st.s.getDB('test');
+ assert.writeOK(testDB.foo.insert({a: 1}));
-var res = testDB.adminCommand({ copydb: 1,
- fromhost: st.s.host,
- fromdb: 'test',
- todb: 'test_copy' });
-assert.commandWorked(res);
+ var res =
+ testDB.adminCommand({copydb: 1, fromhost: st.s.host, fromdb: 'test', todb: 'test_copy'});
+ assert.commandWorked(res);
-var copy = st.s.getDB('test_copy');
-assert.eq(1, copy.foo.count());
-assert.eq(1, copy.foo.findOne().a);
+ var copy = st.s.getDB('test_copy');
+ assert.eq(1, copy.foo.count());
+ assert.eq(1, copy.foo.findOne().a);
-// Test invalid todb database name.
-assert.commandFailed(testDB.adminCommand({ copydb: 1,
- fromhost: st.s.host,
- fromdb: 'test_copy',
- todb: 'test/copy' }));
+ // Test invalid todb database name.
+ assert.commandFailed(testDB.adminCommand(
+ {copydb: 1, fromhost: st.s.host, fromdb: 'test_copy', todb: 'test/copy'}));
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/count1.js b/jstests/sharding/count1.js
index a79d3ebfdd4..4686d317f6d 100644
--- a/jstests/sharding/count1.js
+++ b/jstests/sharding/count1.js
@@ -1,175 +1,181 @@
(function() {
-var s = new ShardingTest({ name: "count1", shards: 2 });
-var db = s.getDB( "test" );
-
-// ************** Test Set #1 *************
-// Basic counts on "bar" collections, not yet sharded
-
-db.bar.save( { n : 1 } );
-db.bar.save( { n : 2 } );
-db.bar.save( { n : 3 } );
-
-assert.eq( 3 , db.bar.find().count() , "bar 1" );
-assert.eq( 1 , db.bar.find( { n : 1 } ).count() , "bar 2" );
-
-//************** Test Set #2 *************
-// Basic counts on sharded "foo" collection.
-// 1. Create foo collection, insert 6 docs
-// 2. Divide into three chunks
-// 3. Test counts before chunk migrations
-// 4. Manually move chunks. Now each shard should have 3 docs.
-// 5. i. Test basic counts on foo
-// ii. Test counts with limit
-// iii. Test counts with skip
-// iv. Test counts with skip + limit
-// v. Test counts with skip + limit + sorting
-// 6. Insert 10 more docs. Further limit/skip testing with a find query
-// 7. test invalid queries/values
-
-// part 1
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { name : 1 } } );
-
-primary = s.getPrimaryShard( "test" ).getDB( "test" );
-secondary = s.getOther( primary ).getDB( "test" );
-
-assert.eq( 1 , s.config.chunks.count() , "sanity check A" );
-
-db.foo.save( { _id : 1 , name : "eliot" } );
-db.foo.save( { _id : 2 , name : "sara" } );
-db.foo.save( { _id : 3 , name : "bob" } );
-db.foo.save( { _id : 4 , name : "joe" } );
-db.foo.save( { _id : 5 , name : "mark" } );
-db.foo.save( { _id : 6 , name : "allan" } );
-
-assert.eq( 6 , db.foo.find().count() , "basic count" );
-
-// part 2
-s.adminCommand({ split: "test.foo", middle: { name: "allan" }});
-s.adminCommand({ split: "test.foo", middle: { name: "sara" }});
-s.adminCommand({ split: "test.foo", middle: { name: "eliot" }});
-
-// MINKEY->allan,bob->eliot,joe,mark->sara,MAXKEY
-
-s.printChunks();
-
-// part 3
-assert.eq( 6 , db.foo.find().count() , "basic count after split " );
-assert.eq( 6 , db.foo.find().sort( { name : 1 } ).count() , "basic count after split sorted " );
-
-// part 4
-s.adminCommand( { movechunk : "test.foo" , find : { name : "eliot" } , to : secondary.getMongo().name , _waitForDelete : true } );
-
-assert.eq( 3 , primary.foo.find().toArray().length , "primary count" );
-assert.eq( 3 , secondary.foo.find().toArray().length , "secondary count" );
-assert.eq( 3 , primary.foo.find().sort( { name : 1 } ).toArray().length , "primary count sorted" );
-assert.eq( 3 , secondary.foo.find().sort( { name : 1 } ).toArray().length , "secondary count sorted" );
-
-// part 5
-// Some redundant tests, but better safe than sorry. These are fast tests, anyway.
-
-// i.
-assert.eq( 6 , db.foo.find().count() , "total count after move" );
-assert.eq( 6 , db.foo.find().toArray().length , "total count after move" );
-assert.eq( 6 , db.foo.find().sort( { name : 1 } ).toArray().length , "total count() sorted" );
-assert.eq( 6 , db.foo.find().sort( { name : 1 } ).count() , "total count with count() after move" );
-
-// ii.
-assert.eq( 2 , db.foo.find().limit(2).count(true) );
-assert.eq( 2 , db.foo.find().limit(-2).count(true) );
-assert.eq( 6 , db.foo.find().limit(100).count(true) );
-assert.eq( 6 , db.foo.find().limit(-100).count(true) );
-assert.eq( 6 , db.foo.find().limit(0).count(true) );
-
-// iii.
-assert.eq( 6 , db.foo.find().skip(0).count(true) );
-assert.eq( 5 , db.foo.find().skip(1).count(true) );
-assert.eq( 4 , db.foo.find().skip(2).count(true) );
-assert.eq( 3 , db.foo.find().skip(3).count(true) );
-assert.eq( 2 , db.foo.find().skip(4).count(true) );
-assert.eq( 1 , db.foo.find().skip(5).count(true) );
-assert.eq( 0 , db.foo.find().skip(6).count(true) );
-assert.eq( 0 , db.foo.find().skip(7).count(true) );
-
-// iv.
-assert.eq( 2 , db.foo.find().limit(2).skip(1).count(true) );
-assert.eq( 2 , db.foo.find().limit(-2).skip(1).count(true) );
-assert.eq( 5 , db.foo.find().limit(100).skip(1).count(true) );
-assert.eq( 5 , db.foo.find().limit(-100).skip(1).count(true) );
-assert.eq( 5 , db.foo.find().limit(0).skip(1).count(true) );
-
-assert.eq( 0 , db.foo.find().limit(2).skip(10).count(true) );
-assert.eq( 0 , db.foo.find().limit(-2).skip(10).count(true) );
-assert.eq( 0 , db.foo.find().limit(100).skip(10).count(true) );
-assert.eq( 0 , db.foo.find().limit(-100).skip(10).count(true) );
-assert.eq( 0 , db.foo.find().limit(0).skip(10).count(true) );
-
-assert.eq( 2 , db.foo.find().limit(2).itcount() , "LS1" );
-assert.eq( 2 , db.foo.find().skip(2).limit(2).itcount() , "LS2" );
-assert.eq( 1 , db.foo.find().skip(5).limit(2).itcount() , "LS3" );
-assert.eq( 6 , db.foo.find().limit(2).count() , "LSC1" );
-assert.eq( 2 , db.foo.find().limit(2).size() , "LSC2" );
-assert.eq( 2 , db.foo.find().skip(2).limit(2).size() , "LSC3" );
-assert.eq( 1 , db.foo.find().skip(5).limit(2).size() , "LSC4" );
-assert.eq( 4 , db.foo.find().skip(1).limit(4).size() , "LSC5" );
-assert.eq( 5 , db.foo.find().skip(1).limit(6).size() , "LSC6" );
-
-// SERVER-3567 older negative limit tests
-assert.eq( 2 , db.foo.find().limit(2).itcount() , "N1" );
-assert.eq( 2 , db.foo.find().limit(-2).itcount() , "N2" );
-assert.eq( 2 , db.foo.find().skip(4).limit(2).itcount() , "N3" );
-assert.eq( 2 , db.foo.find().skip(4).limit(-2).itcount() , "N4" );
-
-// v.
-function nameString( c ){
- var s = "";
- while ( c.hasNext() ){
- var o = c.next();
- if ( s.length > 0 )
- s += ",";
- s += o.name;
+ var s = new ShardingTest({name: "count1", shards: 2});
+ var db = s.getDB("test");
+
+ // ************** Test Set #1 *************
+ // Basic counts on "bar" collections, not yet sharded
+
+ db.bar.save({n: 1});
+ db.bar.save({n: 2});
+ db.bar.save({n: 3});
+
+ assert.eq(3, db.bar.find().count(), "bar 1");
+ assert.eq(1, db.bar.find({n: 1}).count(), "bar 2");
+
+ //************** Test Set #2 *************
+ // Basic counts on sharded "foo" collection.
+ // 1. Create foo collection, insert 6 docs
+ // 2. Divide into three chunks
+ // 3. Test counts before chunk migrations
+ // 4. Manually move chunks. Now each shard should have 3 docs.
+ // 5. i. Test basic counts on foo
+ // ii. Test counts with limit
+ // iii. Test counts with skip
+ // iv. Test counts with skip + limit
+ // v. Test counts with skip + limit + sorting
+ // 6. Insert 10 more docs. Further limit/skip testing with a find query
+ // 7. test invalid queries/values
+
+ // part 1
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {name: 1}});
+
+ primary = s.getPrimaryShard("test").getDB("test");
+ secondary = s.getOther(primary).getDB("test");
+
+ assert.eq(1, s.config.chunks.count(), "sanity check A");
+
+ db.foo.save({_id: 1, name: "eliot"});
+ db.foo.save({_id: 2, name: "sara"});
+ db.foo.save({_id: 3, name: "bob"});
+ db.foo.save({_id: 4, name: "joe"});
+ db.foo.save({_id: 5, name: "mark"});
+ db.foo.save({_id: 6, name: "allan"});
+
+ assert.eq(6, db.foo.find().count(), "basic count");
+
+ // part 2
+ s.adminCommand({split: "test.foo", middle: {name: "allan"}});
+ s.adminCommand({split: "test.foo", middle: {name: "sara"}});
+ s.adminCommand({split: "test.foo", middle: {name: "eliot"}});
+
+ // MINKEY->allan,bob->eliot,joe,mark->sara,MAXKEY
+
+ s.printChunks();
+
+ // part 3
+ assert.eq(6, db.foo.find().count(), "basic count after split ");
+ assert.eq(6, db.foo.find().sort({name: 1}).count(), "basic count after split sorted ");
+
+ // part 4
+ s.adminCommand({
+ movechunk: "test.foo",
+ find: {name: "eliot"},
+ to: secondary.getMongo().name,
+ _waitForDelete: true
+ });
+
+ assert.eq(3, primary.foo.find().toArray().length, "primary count");
+ assert.eq(3, secondary.foo.find().toArray().length, "secondary count");
+ assert.eq(3, primary.foo.find().sort({name: 1}).toArray().length, "primary count sorted");
+ assert.eq(3, secondary.foo.find().sort({name: 1}).toArray().length, "secondary count sorted");
+
+ // part 5
+ // Some redundant tests, but better safe than sorry. These are fast tests, anyway.
+
+ // i.
+ assert.eq(6, db.foo.find().count(), "total count after move");
+ assert.eq(6, db.foo.find().toArray().length, "total count after move");
+ assert.eq(6, db.foo.find().sort({name: 1}).toArray().length, "total count() sorted");
+ assert.eq(6, db.foo.find().sort({name: 1}).count(), "total count with count() after move");
+
+ // ii.
+ assert.eq(2, db.foo.find().limit(2).count(true));
+ assert.eq(2, db.foo.find().limit(-2).count(true));
+ assert.eq(6, db.foo.find().limit(100).count(true));
+ assert.eq(6, db.foo.find().limit(-100).count(true));
+ assert.eq(6, db.foo.find().limit(0).count(true));
+
+ // iii.
+ assert.eq(6, db.foo.find().skip(0).count(true));
+ assert.eq(5, db.foo.find().skip(1).count(true));
+ assert.eq(4, db.foo.find().skip(2).count(true));
+ assert.eq(3, db.foo.find().skip(3).count(true));
+ assert.eq(2, db.foo.find().skip(4).count(true));
+ assert.eq(1, db.foo.find().skip(5).count(true));
+ assert.eq(0, db.foo.find().skip(6).count(true));
+ assert.eq(0, db.foo.find().skip(7).count(true));
+
+ // iv.
+ assert.eq(2, db.foo.find().limit(2).skip(1).count(true));
+ assert.eq(2, db.foo.find().limit(-2).skip(1).count(true));
+ assert.eq(5, db.foo.find().limit(100).skip(1).count(true));
+ assert.eq(5, db.foo.find().limit(-100).skip(1).count(true));
+ assert.eq(5, db.foo.find().limit(0).skip(1).count(true));
+
+ assert.eq(0, db.foo.find().limit(2).skip(10).count(true));
+ assert.eq(0, db.foo.find().limit(-2).skip(10).count(true));
+ assert.eq(0, db.foo.find().limit(100).skip(10).count(true));
+ assert.eq(0, db.foo.find().limit(-100).skip(10).count(true));
+ assert.eq(0, db.foo.find().limit(0).skip(10).count(true));
+
+ assert.eq(2, db.foo.find().limit(2).itcount(), "LS1");
+ assert.eq(2, db.foo.find().skip(2).limit(2).itcount(), "LS2");
+ assert.eq(1, db.foo.find().skip(5).limit(2).itcount(), "LS3");
+ assert.eq(6, db.foo.find().limit(2).count(), "LSC1");
+ assert.eq(2, db.foo.find().limit(2).size(), "LSC2");
+ assert.eq(2, db.foo.find().skip(2).limit(2).size(), "LSC3");
+ assert.eq(1, db.foo.find().skip(5).limit(2).size(), "LSC4");
+ assert.eq(4, db.foo.find().skip(1).limit(4).size(), "LSC5");
+ assert.eq(5, db.foo.find().skip(1).limit(6).size(), "LSC6");
+
+ // SERVER-3567 older negative limit tests
+ assert.eq(2, db.foo.find().limit(2).itcount(), "N1");
+ assert.eq(2, db.foo.find().limit(-2).itcount(), "N2");
+ assert.eq(2, db.foo.find().skip(4).limit(2).itcount(), "N3");
+ assert.eq(2, db.foo.find().skip(4).limit(-2).itcount(), "N4");
+
+ // v.
+ function nameString(c) {
+ var s = "";
+ while (c.hasNext()) {
+ var o = c.next();
+ if (s.length > 0)
+ s += ",";
+ s += o.name;
+ }
+ return s;
}
- return s;
-}
-assert.eq( "allan,bob,eliot,joe,mark,sara" , nameString( db.foo.find().sort( { name : 1 } ) ) , "sort 1" );
-assert.eq( "sara,mark,joe,eliot,bob,allan" , nameString( db.foo.find().sort( { name : -1 } ) ) , "sort 2" );
-
-assert.eq( "allan,bob" , nameString( db.foo.find().sort( { name : 1 } ).limit(2) ) , "LSD1" );
-assert.eq( "bob,eliot" , nameString( db.foo.find().sort( { name : 1 } ).skip(1).limit(2) ) , "LSD2" );
-assert.eq( "joe,mark" , nameString( db.foo.find().sort( { name : 1 } ).skip(3).limit(2) ) , "LSD3" );
-
-assert.eq( "eliot,sara" , nameString( db.foo.find().sort( { _id : 1 } ).limit(2) ) , "LSE1" );
-assert.eq( "sara,bob" , nameString( db.foo.find().sort( { _id : 1 } ).skip(1).limit(2) ) , "LSE2" );
-assert.eq( "joe,mark" , nameString( db.foo.find().sort( { _id : 1 } ).skip(3).limit(2) ) , "LSE3" );
-
-// part 6
-for ( i=0; i<10; i++ ){
- db.foo.save( { _id : 7 + i , name : "zzz" + i } );
-}
-
-assert.eq( 10 , db.foo.find( { name : { $gt : "z" } } ).itcount() , "LSF1" );
-assert.eq( 10 , db.foo.find( { name : { $gt : "z" } } ).sort( { _id : 1 } ).itcount() , "LSF2" );
-assert.eq( 5 , db.foo.find( { name : { $gt : "z" } } ).sort( { _id : 1 } ).skip(5).itcount() , "LSF3" );
-assert.eq( 3 , db.foo.find( { name : { $gt : "z" } } ).sort( { _id : 1 } ).skip(5).limit(3).itcount() , "LSF4" );
-
-// part 7
-// Make sure count command returns error for invalid queries
-var badCmdResult = db.runCommand({ count: 'foo', query: { $c: { $abc: 3 }}});
-assert( ! badCmdResult.ok , "invalid query syntax didn't return error" );
-assert( badCmdResult.errmsg.length > 0 , "no error msg for invalid query" );
-
-// Negative skip values should return error
-var negSkipResult = db.runCommand({ count: 'foo', skip : -2 });
-assert( ! negSkipResult.ok , "negative skip value shouldn't work" );
-assert( negSkipResult.errmsg.length > 0 , "no error msg for negative skip" );
-
-// Negative skip values with positive limit should return error
-var negSkipLimitResult = db.runCommand({ count: 'foo', skip : -2, limit : 1 });
-assert( ! negSkipLimitResult.ok , "negative skip value with limit shouldn't work" );
-assert( negSkipLimitResult.errmsg.length > 0 , "no error msg for negative skip" );
-
-s.stop();
+ assert.eq("allan,bob,eliot,joe,mark,sara", nameString(db.foo.find().sort({name: 1})), "sort 1");
+ assert.eq(
+ "sara,mark,joe,eliot,bob,allan", nameString(db.foo.find().sort({name: -1})), "sort 2");
+
+ assert.eq("allan,bob", nameString(db.foo.find().sort({name: 1}).limit(2)), "LSD1");
+ assert.eq("bob,eliot", nameString(db.foo.find().sort({name: 1}).skip(1).limit(2)), "LSD2");
+ assert.eq("joe,mark", nameString(db.foo.find().sort({name: 1}).skip(3).limit(2)), "LSD3");
+
+ assert.eq("eliot,sara", nameString(db.foo.find().sort({_id: 1}).limit(2)), "LSE1");
+ assert.eq("sara,bob", nameString(db.foo.find().sort({_id: 1}).skip(1).limit(2)), "LSE2");
+ assert.eq("joe,mark", nameString(db.foo.find().sort({_id: 1}).skip(3).limit(2)), "LSE3");
+
+ // part 6
+ for (i = 0; i < 10; i++) {
+ db.foo.save({_id: 7 + i, name: "zzz" + i});
+ }
+
+ assert.eq(10, db.foo.find({name: {$gt: "z"}}).itcount(), "LSF1");
+ assert.eq(10, db.foo.find({name: {$gt: "z"}}).sort({_id: 1}).itcount(), "LSF2");
+ assert.eq(5, db.foo.find({name: {$gt: "z"}}).sort({_id: 1}).skip(5).itcount(), "LSF3");
+ assert.eq(3, db.foo.find({name: {$gt: "z"}}).sort({_id: 1}).skip(5).limit(3).itcount(), "LSF4");
+
+ // part 7
+ // Make sure count command returns error for invalid queries
+ var badCmdResult = db.runCommand({count: 'foo', query: {$c: {$abc: 3}}});
+ assert(!badCmdResult.ok, "invalid query syntax didn't return error");
+ assert(badCmdResult.errmsg.length > 0, "no error msg for invalid query");
+
+ // Negative skip values should return error
+ var negSkipResult = db.runCommand({count: 'foo', skip: -2});
+ assert(!negSkipResult.ok, "negative skip value shouldn't work");
+ assert(negSkipResult.errmsg.length > 0, "no error msg for negative skip");
+
+ // Negative skip values with positive limit should return error
+ var negSkipLimitResult = db.runCommand({count: 'foo', skip: -2, limit: 1});
+ assert(!negSkipLimitResult.ok, "negative skip value with limit shouldn't work");
+ assert(negSkipLimitResult.errmsg.length > 0, "no error msg for negative skip");
+
+ s.stop();
})();
diff --git a/jstests/sharding/count2.js b/jstests/sharding/count2.js
index cbef67a2db9..8b1346fd0d4 100644
--- a/jstests/sharding/count2.js
+++ b/jstests/sharding/count2.js
@@ -1,56 +1,56 @@
(function() {
-var s1 = new ShardingTest({ name: "count2",
- shards: 2,
- mongos: 2 });
-var s2 = s1._mongos[1];
+ var s1 = new ShardingTest({name: "count2", shards: 2, mongos: 2});
+ var s2 = s1._mongos[1];
-s1.adminCommand( { enablesharding: "test" } );
-s1.ensurePrimaryShard('test', 'shard0001');
-s1.adminCommand( { shardcollection: "test.foo" , key : { name : 1 } } );
+ s1.adminCommand({enablesharding: "test"});
+ s1.ensurePrimaryShard('test', 'shard0001');
+ s1.adminCommand({shardcollection: "test.foo", key: {name: 1}});
-var db1 = s1.getDB( "test" ).foo;
-var db2 = s2.getDB( "test" ).foo;
+ var db1 = s1.getDB("test").foo;
+ var db2 = s2.getDB("test").foo;
-assert.eq( 1, s1.config.chunks.count(), "sanity check A");
+ assert.eq(1, s1.config.chunks.count(), "sanity check A");
-db1.save( { name : "aaa" } );
-db1.save( { name : "bbb" } );
-db1.save( { name : "ccc" } );
-db1.save( { name : "ddd" } );
-db1.save( { name : "eee" } );
-db1.save( { name : "fff" } );
+ db1.save({name: "aaa"});
+ db1.save({name: "bbb"});
+ db1.save({name: "ccc"});
+ db1.save({name: "ddd"});
+ db1.save({name: "eee"});
+ db1.save({name: "fff"});
-s1.adminCommand( { split : "test.foo" , middle : { name : "ddd" } } );
+ s1.adminCommand({split: "test.foo", middle: {name: "ddd"}});
-assert.eq( 3, db1.count( { name : { $gte: "aaa" , $lt: "ddd" } } ) , "initial count mongos1" );
-assert.eq( 3, db2.count( { name : { $gte: "aaa" , $lt: "ddd" } } ) , "initial count mongos2" );
+ assert.eq(3, db1.count({name: {$gte: "aaa", $lt: "ddd"}}), "initial count mongos1");
+ assert.eq(3, db2.count({name: {$gte: "aaa", $lt: "ddd"}}), "initial count mongos2");
-s1.printChunks( "test.foo" );
+ s1.printChunks("test.foo");
-s1.adminCommand( { movechunk : "test.foo",
- find : { name : "aaa" },
- to : s1.getOther( s1.getPrimaryShard( "test" ) ).name,
- _waitForDelete : true });
+ s1.adminCommand({
+ movechunk: "test.foo",
+ find: {name: "aaa"},
+ to: s1.getOther(s1.getPrimaryShard("test")).name,
+ _waitForDelete: true
+ });
-assert.eq( 3, db1.count( { name : { $gte: "aaa" , $lt: "ddd" } } ) , "post count mongos1" );
+ assert.eq(3, db1.count({name: {$gte: "aaa", $lt: "ddd"}}), "post count mongos1");
-// The second mongos still thinks its shard mapping is valid and accepts a cound
-print( "before sleep: " + Date() );
-sleep( 2000 );
-print( "after sleep: " + Date() );
-s1.printChunks( "test.foo" );
-assert.eq( 3, db2.find( { name : { $gte: "aaa" , $lt: "ddd" } } ).count() , "post count mongos2" );
+ // The second mongos still thinks its shard mapping is valid and accepts a cound
+ print("before sleep: " + Date());
+ sleep(2000);
+ print("after sleep: " + Date());
+ s1.printChunks("test.foo");
+ assert.eq(3, db2.find({name: {$gte: "aaa", $lt: "ddd"}}).count(), "post count mongos2");
-db2.findOne();
+ db2.findOne();
-assert.eq( 3, db2.count( { name : { $gte: "aaa" , $lt: "ddd" } } ) );
+ assert.eq(3, db2.count({name: {$gte: "aaa", $lt: "ddd"}}));
-assert.eq( 4, db2.find().limit( 4 ).count( true ));
-assert.eq( 4, db2.find().limit( -4 ).count( true ));
-assert.eq( 6, db2.find().limit( 0 ).count( true ));
-assert.eq( 6, db2.getDB().runCommand({ count: db2.getName(), limit: 0 }).n );
+ assert.eq(4, db2.find().limit(4).count(true));
+ assert.eq(4, db2.find().limit(-4).count(true));
+ assert.eq(6, db2.find().limit(0).count(true));
+ assert.eq(6, db2.getDB().runCommand({count: db2.getName(), limit: 0}).n);
-s1.stop();
+ s1.stop();
})();
diff --git a/jstests/sharding/count_config_servers.js b/jstests/sharding/count_config_servers.js
index 86517073336..ed8bf19cf10 100644
--- a/jstests/sharding/count_config_servers.js
+++ b/jstests/sharding/count_config_servers.js
@@ -3,62 +3,62 @@
* This test fails when run with authentication due to SERVER-6327
*/
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({name: 'sync_conn_cmd', shards: 0});
-st.s.setSlaveOk(true);
+ var st = new ShardingTest({name: 'sync_conn_cmd', shards: 0});
+ st.s.setSlaveOk(true);
-var configDB = st.config;
-var coll = configDB.test;
+ var configDB = st.config;
+ var coll = configDB.test;
-for( var x = 0; x < 10; x++ ){
- assert.writeOK(coll.insert({ v: x }));
-}
+ for (var x = 0; x < 10; x++) {
+ assert.writeOK(coll.insert({v: x}));
+ }
-if (st.configRS) {
- // Make sure the inserts are replicated to all config servers.
- st.configRS.awaitReplication();
-}
+ if (st.configRS) {
+ // Make sure the inserts are replicated to all config servers.
+ st.configRS.awaitReplication();
+ }
-var testNormalCount = function(){
- var cmdRes = configDB.runCommand({ count: coll.getName() });
- assert( cmdRes.ok );
- assert.eq( 10, cmdRes.n );
-};
+ var testNormalCount = function() {
+ var cmdRes = configDB.runCommand({count: coll.getName()});
+ assert(cmdRes.ok);
+ assert.eq(10, cmdRes.n);
+ };
-var testCountWithQuery = function(){
- var cmdRes = configDB.runCommand({ count: coll.getName(), query: { v: { $gt: 6 }}});
- assert( cmdRes.ok );
- assert.eq( 3, cmdRes.n );
-};
+ var testCountWithQuery = function() {
+ var cmdRes = configDB.runCommand({count: coll.getName(), query: {v: {$gt: 6}}});
+ assert(cmdRes.ok);
+ assert.eq(3, cmdRes.n);
+ };
-// Use invalid query operator to make the count return error
-var testInvalidCount = function(){
- var cmdRes = configDB.runCommand({ count: coll.getName(), query: { $c: { $abc: 3 }}});
- assert( !cmdRes.ok );
- assert( cmdRes.errmsg.length > 0 );
-};
+ // Use invalid query operator to make the count return error
+ var testInvalidCount = function() {
+ var cmdRes = configDB.runCommand({count: coll.getName(), query: {$c: {$abc: 3}}});
+ assert(!cmdRes.ok);
+ assert(cmdRes.errmsg.length > 0);
+ };
-// Test with all config servers up
-testNormalCount();
-testCountWithQuery();
-testInvalidCount();
+ // Test with all config servers up
+ testNormalCount();
+ testCountWithQuery();
+ testInvalidCount();
-// Test with the first config server down
-MongoRunner.stopMongod(st.c0);
+ // Test with the first config server down
+ MongoRunner.stopMongod(st.c0);
-testNormalCount();
-testCountWithQuery();
-testInvalidCount();
+ testNormalCount();
+ testCountWithQuery();
+ testInvalidCount();
-// Test with the first and second config server down
-MongoRunner.stopMongod(st.c1);
-jsTest.log( 'Second server is down' );
+ // Test with the first and second config server down
+ MongoRunner.stopMongod(st.c1);
+ jsTest.log('Second server is down');
-testNormalCount();
-testCountWithQuery();
-testInvalidCount();
+ testNormalCount();
+ testCountWithQuery();
+ testInvalidCount();
-st.stop();
+ st.stop();
}());
diff --git a/jstests/sharding/count_slaveok.js b/jstests/sharding/count_slaveok.js
index eaf39a18352..70f0d7091d9 100644
--- a/jstests/sharding/count_slaveok.js
+++ b/jstests/sharding/count_slaveok.js
@@ -1,74 +1,70 @@
// Tests count and distinct using slaveOk. Also tests a scenario querying a set where only one
// secondary is up.
(function() {
-'use strict';
-
-var st = new ShardingTest({ name: "countSlaveOk",
- shards: 1,
- mongos: 1,
- other: { rs: true,
- rs0: { nodes: 2 } } });
-
-var rst = st._rs[0].test;
-
-// Insert data into replica set
-var conn = new Mongo(st.s.host);
-conn.setLogLevel(3);
-
-var coll = conn.getCollection('test.countSlaveOk');
-coll.drop();
-
-var bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < 300; i++ ){
- bulk.insert({ i: i % 10 });
-}
-assert.writeOK(bulk.execute());
-
-var connA = conn;
-var connB = new Mongo( st.s.host );
-var connC = new Mongo( st.s.host );
-
-st.printShardingStatus();
-
-// Wait for client to update itself and replication to finish
-rst.awaitReplication();
-
-var primary = rst.getPrimary();
-var sec = rst.getSecondary();
-
-// Data now inserted... stop the master, since only two in set, other will still be secondary
-rst.stop(rst.getPrimary());
-printjson( rst.status() );
-
-// Wait for the mongos to recognize the slave
-ReplSetTest.awaitRSClientHosts( conn, sec, { ok : true, secondary : true } );
-
-// Make sure that mongos realizes that primary is already down
-ReplSetTest.awaitRSClientHosts( conn, primary, { ok : false });
-
-// Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when
-// master is down
-conn.setSlaveOk();
-
-// count using the command path
-assert.eq( 30, coll.find({ i : 0 }).count() );
-// count using the query path
-assert.eq( 30, coll.find({ i : 0 }).itcount() );
-assert.eq( 10, coll.distinct("i").length );
-
-try {
- conn.setSlaveOk( false );
- // Should throw exception, since not slaveOk'd
- coll.find({ i : 0 }).count();
-
- print( "Should not reach here!" );
- assert( false );
-
-}
-catch( e ){
- print( "Non-slaveOk'd connection failed." );
-}
-
-st.stop();
+ 'use strict';
+
+ var st = new ShardingTest(
+ {name: "countSlaveOk", shards: 1, mongos: 1, other: {rs: true, rs0: {nodes: 2}}});
+
+ var rst = st._rs[0].test;
+
+ // Insert data into replica set
+ var conn = new Mongo(st.s.host);
+ conn.setLogLevel(3);
+
+ var coll = conn.getCollection('test.countSlaveOk');
+ coll.drop();
+
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < 300; i++) {
+ bulk.insert({i: i % 10});
+ }
+ assert.writeOK(bulk.execute());
+
+ var connA = conn;
+ var connB = new Mongo(st.s.host);
+ var connC = new Mongo(st.s.host);
+
+ st.printShardingStatus();
+
+ // Wait for client to update itself and replication to finish
+ rst.awaitReplication();
+
+ var primary = rst.getPrimary();
+ var sec = rst.getSecondary();
+
+ // Data now inserted... stop the master, since only two in set, other will still be secondary
+ rst.stop(rst.getPrimary());
+ printjson(rst.status());
+
+ // Wait for the mongos to recognize the slave
+ ReplSetTest.awaitRSClientHosts(conn, sec, {ok: true, secondary: true});
+
+ // Make sure that mongos realizes that primary is already down
+ ReplSetTest.awaitRSClientHosts(conn, primary, {ok: false});
+
+ // Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when
+ // master is down
+ conn.setSlaveOk();
+
+ // count using the command path
+ assert.eq(30, coll.find({i: 0}).count());
+ // count using the query path
+ assert.eq(30, coll.find({i: 0}).itcount());
+ assert.eq(10, coll.distinct("i").length);
+
+ try {
+ conn.setSlaveOk(false);
+ // Should throw exception, since not slaveOk'd
+ coll.find({i: 0}).count();
+
+ print("Should not reach here!");
+ assert(false);
+
+ } catch (e) {
+ print("Non-slaveOk'd connection failed.");
+ }
+
+ st.stop();
})();
diff --git a/jstests/sharding/covered_shard_key_indexes.js b/jstests/sharding/covered_shard_key_indexes.js
index e5cd1ce93b0..307dc241d9f 100644
--- a/jstests/sharding/covered_shard_key_indexes.js
+++ b/jstests/sharding/covered_shard_key_indexes.js
@@ -6,147 +6,155 @@
// Include helpers for analyzing explain output.
load("jstests/libs/analyze_plan.js");
-var st = new ShardingTest({ shards : 1 });
+var st = new ShardingTest({shards: 1});
var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var shards = mongos.getCollection( "config.shards" ).find().toArray();
-var coll = mongos.getCollection( "foo.bar" );
+var admin = mongos.getDB("admin");
+var shards = mongos.getCollection("config.shards").find().toArray();
+var coll = mongos.getCollection("foo.bar");
//
//
// Tests with _id : 1 shard key
-assert(admin.runCommand({ enableSharding : coll.getDB() + "" }).ok);
-printjson(admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }));
-assert(admin.runCommand({ shardCollection : coll + "", key : { _id : 1 }}).ok);
+assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id}));
+assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
st.printShardingStatus();
// Insert some data
-assert.writeOK(coll.insert({ _id : true, a : true, b : true }));
+assert.writeOK(coll.insert({_id: true, a: true, b: true}));
-assert.commandWorked(st.shard0.adminCommand({ setParameter: 1,
- logComponentVerbosity: { query: { verbosity: 5 }}}));
+assert.commandWorked(
+ st.shard0.adminCommand({setParameter: 1, logComponentVerbosity: {query: {verbosity: 5}}}));
//
// Index without shard key query - not covered
-assert.commandWorked(coll.ensureIndex({ a : 1 }));
-assert.eq(1, coll.find({ a : true }).explain(true).executionStats.totalDocsExamined);
-assert.eq(1, coll.find({ a : true }, { _id : 1, a : 1 })
- .explain(true).executionStats.totalDocsExamined);
+assert.commandWorked(coll.ensureIndex({a: 1}));
+assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(1, coll.find({a: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
//
// Index with shard key query - covered when projecting
assert.commandWorked(coll.dropIndexes());
-assert.commandWorked(coll.ensureIndex({ a : 1, _id : 1 }));
-assert.eq(1, coll.find({ a : true }).explain(true).executionStats.totalDocsExamined);
-assert.eq(0, coll.find({ a : true }, { _id : 1, a : 1 })
- .explain(true).executionStats.totalDocsExamined);
+assert.commandWorked(coll.ensureIndex({a: 1, _id: 1}));
+assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(0, coll.find({a: true}, {_id: 1, a: 1}).explain(true).executionStats.totalDocsExamined);
//
// Compound index with shard key query - covered when projecting
assert.commandWorked(coll.dropIndexes());
-assert.commandWorked(coll.ensureIndex({ a : 1, b : 1, _id : 1 }));
-assert.eq(1, coll.find({ a : true, b : true }).explain(true).executionStats.totalDocsExamined);
-assert.eq(0, coll.find({ a : true, b : true }, { _id : 1, a : 1 })
- .explain(true).executionStats.totalDocsExamined);
+assert.commandWorked(coll.ensureIndex({a: 1, b: 1, _id: 1}));
+assert.eq(1, coll.find({a: true, b: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(0,
+ coll.find({a: true, b: true}, {_id: 1, a: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
//
//
// Tests with _id : hashed shard key
coll.drop();
-assert(admin.runCommand({ shardCollection : coll + "", key : { _id : "hashed" }}).ok);
+assert(admin.runCommand({shardCollection: coll + "", key: {_id: "hashed"}}).ok);
st.printShardingStatus();
// Insert some data
-assert.writeOK(coll.insert({ _id : true, a : true, b : true }));
+assert.writeOK(coll.insert({_id: true, a: true, b: true}));
//
// Index without shard key query - not covered
-assert.commandWorked(coll.ensureIndex({ a : 1 }));
-assert.eq(1, coll.find({ a : true }).explain(true).executionStats.totalDocsExamined);
-assert.eq(1, coll.find({ a : true }, { _id : 0, a : 1 })
- .explain(true).executionStats.totalDocsExamined);
+assert.commandWorked(coll.ensureIndex({a: 1}));
+assert.eq(1, coll.find({a: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(1, coll.find({a: true}, {_id: 0, a: 1}).explain(true).executionStats.totalDocsExamined);
//
// Index with shard key query - can't be covered since hashed index
-assert.commandWorked(coll.dropIndex({ a : 1 }));
-assert.eq(1, coll.find({ _id : true }).explain(true).executionStats.totalDocsExamined);
-assert.eq(1, coll.find({ _id : true }, { _id : 0 })
- .explain(true).executionStats.totalDocsExamined);
+assert.commandWorked(coll.dropIndex({a: 1}));
+assert.eq(1, coll.find({_id: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(1, coll.find({_id: true}, {_id: 0}).explain(true).executionStats.totalDocsExamined);
//
//
// Tests with compound shard key
coll.drop();
-assert(admin.runCommand({ shardCollection : coll + "", key : { a : 1, b : 1 }}).ok);
+assert(admin.runCommand({shardCollection: coll + "", key: {a: 1, b: 1}}).ok);
st.printShardingStatus();
// Insert some data
-assert.writeOK(coll.insert({ _id : true, a : true, b : true, c : true, d : true }));
+assert.writeOK(coll.insert({_id: true, a: true, b: true, c: true, d: true}));
//
// Index without shard key query - not covered
-assert.commandWorked(coll.ensureIndex({ c : 1 }));
-assert.eq(1, coll.find({ c : true }).explain(true).executionStats.totalDocsExamined);
-assert.eq(1, coll.find({ c : true }, { _id : 0, a : 1, b : 1, c : 1 })
- .explain(true).executionStats.totalDocsExamined);
+assert.commandWorked(coll.ensureIndex({c: 1}));
+assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(1,
+ coll.find({c: true}, {_id: 0, a: 1, b: 1, c: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
//
// Index with shard key query - covered when projecting
-assert.commandWorked(coll.dropIndex({ c : 1 }));
-assert.commandWorked(coll.ensureIndex({ c : 1, b : 1, a : 1 }));
-assert.eq(1, coll.find({ c : true }).explain(true).executionStats.totalDocsExamined);
-assert.eq(0, coll.find({ c : true }, { _id : 0, a : 1, b : 1, c : 1 })
- .explain(true).executionStats.totalDocsExamined);
+assert.commandWorked(coll.dropIndex({c: 1}));
+assert.commandWorked(coll.ensureIndex({c: 1, b: 1, a: 1}));
+assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(0,
+ coll.find({c: true}, {_id: 0, a: 1, b: 1, c: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
//
// Compound index with shard key query - covered when projecting
-assert.commandWorked(coll.dropIndex({ c : 1, b : 1, a : 1 }));
-assert.commandWorked(coll.ensureIndex({ c : 1, d : 1, a : 1, b : 1, _id : 1 }));
-assert.eq(1, coll.find({ c : true, d : true }).explain(true).executionStats.totalDocsExamined);
-assert.eq(0, coll.find({ c : true, d : true }, { a : 1, b : 1, c : 1, d : 1 })
- .explain(true).executionStats.totalDocsExamined);
+assert.commandWorked(coll.dropIndex({c: 1, b: 1, a: 1}));
+assert.commandWorked(coll.ensureIndex({c: 1, d: 1, a: 1, b: 1, _id: 1}));
+assert.eq(1, coll.find({c: true, d: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(0,
+ coll.find({c: true, d: true}, {a: 1, b: 1, c: 1, d: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
//
//
// Tests with nested shard key
coll.drop();
-assert(admin.runCommand({ shardCollection : coll + "", key : { 'a.b' : 1 }}).ok);
+assert(admin.runCommand({shardCollection: coll + "", key: {'a.b': 1}}).ok);
st.printShardingStatus();
// Insert some data
-assert.writeOK(coll.insert({ _id : true, a : { b : true }, c : true }));
+assert.writeOK(coll.insert({_id: true, a: {b: true}, c: true}));
//
// Index without shard key query - not covered
-assert.commandWorked(coll.ensureIndex({ c : 1 }));
-assert.eq(1, coll.find({ c : true }).explain(true).executionStats.totalDocsExamined);
-assert.eq(1, coll.find({ c : true }, { _id : 0, 'a.b' : 1, c : 1 })
- .explain(true).executionStats.totalDocsExamined);
+assert.commandWorked(coll.ensureIndex({c: 1}));
+assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(1,
+ coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
//
// Index with shard key query - nested query not covered even when projecting
-assert.commandWorked(coll.dropIndex({ c : 1 }));
-assert.commandWorked(coll.ensureIndex({ c : 1, 'a.b' : 1 }));
-assert.eq(1, coll.find({ c : true }).explain(true).executionStats.totalDocsExamined);
-assert.eq(1, coll.find({ c : true }, { _id : 0, 'a.b' : 1, c : 1 })
- .explain(true).executionStats.totalDocsExamined);
+assert.commandWorked(coll.dropIndex({c: 1}));
+assert.commandWorked(coll.ensureIndex({c: 1, 'a.b': 1}));
+assert.eq(1, coll.find({c: true}).explain(true).executionStats.totalDocsExamined);
+assert.eq(1,
+ coll.find({c: true}, {_id: 0, 'a.b': 1, c: 1})
+ .explain(true)
+ .executionStats.totalDocsExamined);
//
//
// Tests with bad data with no shard key
coll.drop();
-assert(admin.runCommand({ shardCollection : coll + "", key : { a : 1 }}).ok);
+assert(admin.runCommand({shardCollection: coll + "", key: {a: 1}}).ok);
st.printShardingStatus();
// Insert some bad data manually
-assert.writeOK(st.shard0.getCollection(coll.toString()).insert({ _id : "bad data", c : true }));
+assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: "bad data", c: true}));
//
// Index without shard key query - not covered but succeeds
-assert.commandWorked(coll.ensureIndex({ c : 1 }));
-var explain = coll.find({ c : true }).explain(true).executionStats;
+assert.commandWorked(coll.ensureIndex({c: 1}));
+var explain = coll.find({c: true}).explain(true).executionStats;
assert.eq(0, explain.nReturned);
assert.eq(1, explain.totalDocsExamined);
assert.eq(1, getChunkSkips(explain.executionStages.shards[0].executionStages));
@@ -155,9 +163,9 @@ assert.eq(1, getChunkSkips(explain.executionStages.shards[0].executionStages));
// Index with shard key query - covered and succeeds and returns result
// NOTE: This is weird and only a result of the fact that we don't have a dedicated "does not exist"
// value for indexes
-assert.commandWorked(coll.ensureIndex({ c : 1, a : 1 }));
-jsTest.log(tojson(coll.find({ c : true }, { _id : 0, a : 1, c : 1 }).toArray()));
-var explain = coll.find({ c : true }, { _id : 0, a : 1, c : 1 }).explain(true).executionStats;
+assert.commandWorked(coll.ensureIndex({c: 1, a: 1}));
+jsTest.log(tojson(coll.find({c: true}, {_id: 0, a: 1, c: 1}).toArray()));
+var explain = coll.find({c: true}, {_id: 0, a: 1, c: 1}).explain(true).executionStats;
assert.eq(1, explain.nReturned);
assert.eq(0, explain.totalDocsExamined);
assert.eq(0, getChunkSkips(explain.executionStages.shards[0].executionStages));
diff --git a/jstests/sharding/create_idx_empty_primary.js b/jstests/sharding/create_idx_empty_primary.js
index b6eeacb8cd1..f8beffa7e52 100644
--- a/jstests/sharding/create_idx_empty_primary.js
+++ b/jstests/sharding/create_idx_empty_primary.js
@@ -2,34 +2,33 @@
* Test to make sure that the createIndex command gets sent to all shards.
*/
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards: 2 });
-assert.commandWorked(st.s.adminCommand({ enablesharding: 'test' }));
-st.ensurePrimaryShard('test', 'shard0001');
+ var st = new ShardingTest({shards: 2});
+ assert.commandWorked(st.s.adminCommand({enablesharding: 'test'}));
+ st.ensurePrimaryShard('test', 'shard0001');
-var testDB = st.s.getDB('test');
-assert.commandWorked(testDB.adminCommand({ shardcollection: 'test.user', key: { _id: 1 }}));
+ var testDB = st.s.getDB('test');
+ assert.commandWorked(testDB.adminCommand({shardcollection: 'test.user', key: {_id: 1}}));
-// Move only chunk out of primary shard.
-assert.commandWorked(testDB.adminCommand({ movechunk: 'test.user',
- find: { _id: 0 },
- to: 'shard0000' }));
+ // Move only chunk out of primary shard.
+ assert.commandWorked(
+ testDB.adminCommand({movechunk: 'test.user', find: {_id: 0}, to: 'shard0000'}));
-assert.writeOK(testDB.user.insert({ _id: 0 }));
+ assert.writeOK(testDB.user.insert({_id: 0}));
-var res = testDB.user.ensureIndex({ i: 1 });
-assert.commandWorked(res);
+ var res = testDB.user.ensureIndex({i: 1});
+ assert.commandWorked(res);
-var indexes = testDB.user.getIndexes();
-assert.eq(2, indexes.length);
+ var indexes = testDB.user.getIndexes();
+ assert.eq(2, indexes.length);
-indexes = st.d0.getDB('test').user.getIndexes();
-assert.eq(2, indexes.length);
+ indexes = st.d0.getDB('test').user.getIndexes();
+ assert.eq(2, indexes.length);
-indexes = st.d1.getDB('test').user.getIndexes();
-assert.eq(2, indexes.length);
+ indexes = st.d1.getDB('test').user.getIndexes();
+ assert.eq(2, indexes.length);
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js
index 8b799ce1aa6..3aee9ff2cb5 100644
--- a/jstests/sharding/cursor1.js
+++ b/jstests/sharding/cursor1.js
@@ -2,70 +2,71 @@
// checks that cursors survive a chunk's move
(function() {
-var s = new ShardingTest({ name: "sharding_cursor1", shards: 2 });
-s.config.settings.find().forEach( printjson );
+ var s = new ShardingTest({name: "sharding_cursor1", shards: 2});
+ s.config.settings.find().forEach(printjson);
-// create a sharded 'test.foo', for the moment with just one chunk
-s.adminCommand( { enablesharding: "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection: "test.foo", key: { _id: 1 } } );
+ // create a sharded 'test.foo', for the moment with just one chunk
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
-db = s.getDB( "test" );
-primary = s.getPrimaryShard( "test" ).getDB( "test" );
-secondary = s.getOther( primary ).getDB( "test" );
+ db = s.getDB("test");
+ primary = s.getPrimaryShard("test").getDB("test");
+ secondary = s.getOther(primary).getDB("test");
-var numObjs = 30;
-var bulk = db.foo.initializeUnorderedBulkOp();
-for (i=0; i < numObjs; i++){
- bulk.insert({ _id: i });
-}
-assert.writeOK(bulk.execute());
-assert.eq( 1, s.config.chunks.count() , "test requires collection to have one chunk initially" );
+ var numObjs = 30;
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ for (i = 0; i < numObjs; i++) {
+ bulk.insert({_id: i});
+ }
+ assert.writeOK(bulk.execute());
+ assert.eq(1, s.config.chunks.count(), "test requires collection to have one chunk initially");
-// we'll split the collection in two and move the second chunk while three cursors are open
-// cursor1 still has more data in the first chunk, the one that didn't move
-// cursor2 buffered the last obj of the first chunk
-// cursor3 buffered data that was moved on the second chunk
-var cursor1 = db.foo.find().batchSize( 3 );
-assert.eq( 3 , cursor1.objsLeftInBatch() );
-var cursor2 = db.foo.find().batchSize( 5 );
-assert.eq( 5 , cursor2.objsLeftInBatch() );
-var cursor3 = db.foo.find().batchSize( 7 );
-assert.eq( 7 , cursor3.objsLeftInBatch() );
+ // we'll split the collection in two and move the second chunk while three cursors are open
+ // cursor1 still has more data in the first chunk, the one that didn't move
+ // cursor2 buffered the last obj of the first chunk
+ // cursor3 buffered data that was moved on the second chunk
+ var cursor1 = db.foo.find().batchSize(3);
+ assert.eq(3, cursor1.objsLeftInBatch());
+ var cursor2 = db.foo.find().batchSize(5);
+ assert.eq(5, cursor2.objsLeftInBatch());
+ var cursor3 = db.foo.find().batchSize(7);
+ assert.eq(7, cursor3.objsLeftInBatch());
-s.adminCommand( { split: "test.foo" , middle : { _id : 5 } } );
-s.adminCommand( { movechunk : "test.foo" , find : { _id : 5 } , to : secondary.getMongo().name } );
-assert.eq( 2, s.config.chunks.count() );
+ s.adminCommand({split: "test.foo", middle: {_id: 5}});
+ s.adminCommand({movechunk: "test.foo", find: {_id: 5}, to: secondary.getMongo().name});
+ assert.eq(2, s.config.chunks.count());
-// the cursors should not have been affected
-assert.eq( numObjs , cursor1.itcount() , "c1" );
-assert.eq( numObjs , cursor2.itcount() , "c2" );
-assert.eq( numObjs , cursor3.itcount() , "c3" );
+ // the cursors should not have been affected
+ assert.eq(numObjs, cursor1.itcount(), "c1");
+ assert.eq(numObjs, cursor2.itcount(), "c2");
+ assert.eq(numObjs, cursor3.itcount(), "c3");
-// Test that a cursor with a 1 second timeout eventually times out.
-gc(); gc();
-var cur = db.foo.find().batchSize( 2 );
-assert( cur.next() , "T1" );
-assert( cur.next() , "T2" );
-assert.commandWorked(s.admin.runCommand({
- setParameter: 1,
- cursorTimeoutMillis: 1000 // 1 second.
-}));
+ // Test that a cursor with a 1 second timeout eventually times out.
+ gc();
+ gc();
+ var cur = db.foo.find().batchSize(2);
+ assert(cur.next(), "T1");
+ assert(cur.next(), "T2");
+ assert.commandWorked(s.admin.runCommand({
+ setParameter: 1,
+ cursorTimeoutMillis: 1000 // 1 second.
+ }));
-assert.soon(function() {
- try {
- cur.next();
- cur.next();
- print("cursor still alive");
- return false;
- }
- catch (e) {
- return true;
- }
-}, "cursor failed to time out", /*timeout*/30000, /*interval*/5000);
+ assert.soon(function() {
+ try {
+ cur.next();
+ cur.next();
+ print("cursor still alive");
+ return false;
+ } catch (e) {
+ return true;
+ }
+ }, "cursor failed to time out", /*timeout*/ 30000, /*interval*/ 5000);
-gc(); gc();
+ gc();
+ gc();
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/cursor_cleanup.js b/jstests/sharding/cursor_cleanup.js
index 4eb200b87e3..5d0ce46f532 100644
--- a/jstests/sharding/cursor_cleanup.js
+++ b/jstests/sharding/cursor_cleanup.js
@@ -2,21 +2,21 @@
// Tests cleanup of sharded and unsharded cursors
//
-var st = new ShardingTest({ shards : 2, mongos : 1 });
+var st = new ShardingTest({shards: 2, mongos: 1});
var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var config = mongos.getDB( "config" );
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
var shards = config.shards.find().toArray();
-var coll = mongos.getCollection( "foo.bar" );
-var collUnsharded = mongos.getCollection( "foo.baz" );
+var coll = mongos.getCollection("foo.bar");
+var collUnsharded = mongos.getCollection("foo.baz");
// Shard collection
-printjson(admin.runCommand({ enableSharding : coll.getDB() + "" }));
-printjson(admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }));
-printjson(admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }));
-printjson(admin.runCommand({ split : coll + "", middle : { _id : 0 } }));
-printjson(admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : shards[1]._id }));
+printjson(admin.runCommand({enableSharding: coll.getDB() + ""}));
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id}));
+printjson(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
+printjson(admin.runCommand({split: coll + "", middle: {_id: 0}}));
+printjson(admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id}));
jsTest.log("Collection set up...");
st.printShardingStatus(true);
@@ -26,8 +26,8 @@ jsTest.log("Insert enough data to overwhelm a query batch.");
var bulk = coll.initializeUnorderedBulkOp();
var bulk2 = collUnsharded.initializeUnorderedBulkOp();
for (var i = -150; i < 150; i++) {
- bulk.insert({ _id : i });
- bulk2.insert({ _id : i });
+ bulk.insert({_id: i});
+ bulk2.insert({_id: i});
}
assert.writeOK(bulk.execute());
assert.writeOK(bulk2.execute());
diff --git a/jstests/sharding/delete_during_migrate.js b/jstests/sharding/delete_during_migrate.js
index e44b4cd4078..982b0c00787 100644
--- a/jstests/sharding/delete_during_migrate.js
+++ b/jstests/sharding/delete_during_migrate.js
@@ -3,42 +3,44 @@
// starts. Protect against that by making chunk very large.
// start up a new sharded cluster
-var st = new ShardingTest({ shards : 2, mongos : 1 });
+var st = new ShardingTest({shards: 2, mongos: 1});
// Balancer is by default stopped, thus we have manual control
var dbname = "testDB";
var coll = "foo";
var ns = dbname + "." + coll;
var s = st.s0;
-var t = s.getDB( dbname ).getCollection( coll );
+var t = s.getDB(dbname).getCollection(coll);
-s.adminCommand({ enablesharding: dbname });
+s.adminCommand({enablesharding: dbname});
st.ensurePrimaryShard(dbname, 'shard0001');
// Create fresh collection with lots of docs
t.drop();
var bulk = t.initializeUnorderedBulkOp();
for (var i = 0; i < 200000; i++) {
- bulk.insert({ a: i });
+ bulk.insert({a: i});
}
assert.writeOK(bulk.execute());
// enable sharding of the collection. Only 1 chunk.
-t.ensureIndex( { a : 1 } );
-s.adminCommand( { shardcollection : ns , key: { a : 1 } } );
+t.ensureIndex({a: 1});
+s.adminCommand({shardcollection: ns, key: {a: 1}});
// start a parallel shell that deletes things
-startMongoProgramNoConnect( "mongo" ,
- "--host" , getHostName() ,
- "--port" , st.s0.port ,
- "--eval" , "db." + coll + ".remove({});" ,
- dbname );
+startMongoProgramNoConnect("mongo",
+ "--host",
+ getHostName(),
+ "--port",
+ st.s0.port,
+ "--eval",
+ "db." + coll + ".remove({});",
+ dbname);
// migrate while deletions are happening
-var moveResult = s.adminCommand( { moveChunk : ns ,
- find : { a : 1 } ,
- to : st.getOther( st.getPrimaryShard( dbname ) ).name } );
+var moveResult = s.adminCommand(
+ {moveChunk: ns, find: {a: 1}, to: st.getOther(st.getPrimaryShard(dbname)).name});
// check if migration worked
-assert( moveResult.ok , "migration didn't work while doing deletes" );
+assert(moveResult.ok, "migration didn't work while doing deletes");
st.stop();
diff --git a/jstests/sharding/diffservers1.js b/jstests/sharding/diffservers1.js
index 9b8500f01d1..a8f5469ed2f 100644
--- a/jstests/sharding/diffservers1.js
+++ b/jstests/sharding/diffservers1.js
@@ -1,24 +1,25 @@
(function() {
-var s = new ShardingTest({ name: "diffservers1", shards: 2 });
+ var s = new ShardingTest({name: "diffservers1", shards: 2});
-assert.eq( 2 , s.config.shards.count() , "server count wrong" );
-assert.eq( 0 , s._connections[0].getDB( "config" ).shards.count() , "shouldn't be here" );
-assert.eq( 0 , s._connections[1].getDB( "config" ).shards.count() , "shouldn't be here" );
+ assert.eq(2, s.config.shards.count(), "server count wrong");
+ assert.eq(0, s._connections[0].getDB("config").shards.count(), "shouldn't be here");
+ assert.eq(0, s._connections[1].getDB("config").shards.count(), "shouldn't be here");
-test1 = s.getDB( "test1" ).foo;
-test1.save( { a : 1 } );
-test1.save( { a : 2 } );
-test1.save( { a : 3 } );
-assert.eq( 3 , test1.count() );
+ test1 = s.getDB("test1").foo;
+ test1.save({a: 1});
+ test1.save({a: 2});
+ test1.save({a: 3});
+ assert.eq(3, test1.count());
-assert( ! s.admin.runCommand( { addshard: "sdd$%" } ).ok , "bad hostname" );
+ assert(!s.admin.runCommand({addshard: "sdd$%"}).ok, "bad hostname");
-var portWithoutHostRunning = allocatePort();
-assert(!s.admin.runCommand({addshard: "127.0.0.1:" + portWithoutHostRunning}).ok, "host not up");
-assert(!s.admin.runCommand({ addshard: "10.0.0.1:" + portWithoutHostRunning}).ok,
- "allowed shard in IP when config is localhost" );
+ var portWithoutHostRunning = allocatePort();
+ assert(!s.admin.runCommand({addshard: "127.0.0.1:" + portWithoutHostRunning}).ok,
+ "host not up");
+ assert(!s.admin.runCommand({addshard: "10.0.0.1:" + portWithoutHostRunning}).ok,
+ "allowed shard in IP when config is localhost");
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/disable_autosplit.js b/jstests/sharding/disable_autosplit.js
index ec3b593ea24..c6b7b7d5e1f 100644
--- a/jstests/sharding/disable_autosplit.js
+++ b/jstests/sharding/disable_autosplit.js
@@ -1,34 +1,32 @@
// Tests disabling of autosplit from mongos
(function() {
-'use strict';
+ 'use strict';
-var chunkSize = 1; // In MB
+ var chunkSize = 1; // In MB
-var st = new ShardingTest({ shards: 1,
- mongos: 1,
- other: { chunksize: chunkSize,
- mongosOptions: { noAutoSplit: "" } } });
+ var st = new ShardingTest(
+ {shards: 1, mongos: 1, other: {chunksize: chunkSize, mongosOptions: {noAutoSplit: ""}}});
-var data = "x";
-while(data.length < chunkSize * 1024 * 1024) {
- data += data;
-}
+ var data = "x";
+ while (data.length < chunkSize * 1024 * 1024) {
+ data += data;
+ }
-var mongos = st.s0;
-var admin = mongos.getDB("admin");
-var config = mongos.getDB("config");
-var coll = mongos.getCollection("foo.bar");
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var config = mongos.getDB("config");
+ var coll = mongos.getCollection("foo.bar");
-assert.commandWorked(admin.runCommand({ enableSharding: coll.getDB() + "" }));
-assert.commandWorked(admin.runCommand({ shardCollection: coll + "", key: { _id: 1 } }));
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
-for(var i = 0; i < 20; i++) {
- coll.insert({ data: data });
-}
+ for (var i = 0; i < 20; i++) {
+ coll.insert({data: data});
+ }
-// Make sure we haven't split
-assert.eq(1, config.chunks.find({ ns: coll + "" }).count());
+ // Make sure we haven't split
+ assert.eq(1, config.chunks.find({ns: coll + ""}).count());
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/drop_configdb.js b/jstests/sharding/drop_configdb.js
index 8ac87648dfa..63a3b533597 100644
--- a/jstests/sharding/drop_configdb.js
+++ b/jstests/sharding/drop_configdb.js
@@ -1,35 +1,35 @@
// Test that dropping the config database is completely disabled via
// mongos and via mongod, if started with --configsvr
(function() {
-"use strict";
+ "use strict";
-var getConfigsvrToWriteTo = function(st) {
- if (st.configRS) {
- return st.configRS.getPrimary();
- } else {
- return st._configServers[0];
- }
-};
+ var getConfigsvrToWriteTo = function(st) {
+ if (st.configRS) {
+ return st.configRS.getPrimary();
+ } else {
+ return st._configServers[0];
+ }
+ };
-var st = new ShardingTest({ shards : 2 });
-var mongos = st.s;
-var config = getConfigsvrToWriteTo(st).getDB('config');
+ var st = new ShardingTest({shards: 2});
+ var mongos = st.s;
+ var config = getConfigsvrToWriteTo(st).getDB('config');
-// Try to drop config db via configsvr
+ // Try to drop config db via configsvr
-print ( "1: Try to drop config database via configsvr" );
-assert.eq(0, config.dropDatabase().ok);
-assert.eq("Cannot drop 'config' database if mongod started with --configsvr",
- config.dropDatabase().errmsg);
+ print("1: Try to drop config database via configsvr");
+ assert.eq(0, config.dropDatabase().ok);
+ assert.eq("Cannot drop 'config' database if mongod started with --configsvr",
+ config.dropDatabase().errmsg);
-// Try to drop config db via mongos
-var config = mongos.getDB( "config" );
+ // Try to drop config db via mongos
+ var config = mongos.getDB("config");
-print ( "1: Try to drop config database via mongos" );
-assert.eq(0, config.dropDatabase().ok);
+ print("1: Try to drop config database via mongos");
+ assert.eq(0, config.dropDatabase().ok);
-// 20 = ErrorCodes::IllegalOperation
-assert.eq(20, config.dropDatabase().code);
+ // 20 = ErrorCodes::IllegalOperation
+ assert.eq(20, config.dropDatabase().code);
-st.stop();
+ st.stop();
}()); \ No newline at end of file
diff --git a/jstests/sharding/drop_sharded_db.js b/jstests/sharding/drop_sharded_db.js
index 1c3e95460c2..962ff84fc40 100644
--- a/jstests/sharding/drop_sharded_db.js
+++ b/jstests/sharding/drop_sharded_db.js
@@ -1,66 +1,69 @@
// Tests the dropping of a sharded database SERVER-3471 SERVER-1726
(function() {
-var st = new ShardingTest({ shards: 2 });
+ var st = new ShardingTest({shards: 2});
-var mongos = st.s0;
-var config = mongos.getDB("config");
+ var mongos = st.s0;
+ var config = mongos.getDB("config");
-var dbA = mongos.getDB("DropSharded_A");
-var dbB = mongos.getDB("DropSharded_B");
-var dbC = mongos.getDB("DropSharded_C");
+ var dbA = mongos.getDB("DropSharded_A");
+ var dbB = mongos.getDB("DropSharded_B");
+ var dbC = mongos.getDB("DropSharded_C");
-// Dropping a database that doesn't exist will result in an info field in the response.
-var res = assert.commandWorked(dbA.dropDatabase());
-assert.eq('database does not exist', res.info);
+ // Dropping a database that doesn't exist will result in an info field in the response.
+ var res = assert.commandWorked(dbA.dropDatabase());
+ assert.eq('database does not exist', res.info);
-var numDocs = 3000;
-var numColls = 10;
-for (var i = 0; i < numDocs; i++) {
- dbA.getCollection("data" + (i % numColls)).insert({ _id: i });
- dbB.getCollection("data" + (i % numColls)).insert({ _id: i });
- dbC.getCollection("data" + (i % numColls)).insert({ _id: i });
-}
+ var numDocs = 3000;
+ var numColls = 10;
+ for (var i = 0; i < numDocs; i++) {
+ dbA.getCollection("data" + (i % numColls)).insert({_id: i});
+ dbB.getCollection("data" + (i % numColls)).insert({_id: i});
+ dbC.getCollection("data" + (i % numColls)).insert({_id: i});
+ }
-var key = { _id: 1 };
-for (var i = 0; i < numColls; i++) {
- st.shardColl(dbA.getCollection("data" + i), key);
- st.shardColl(dbB.getCollection("data" + i), key);
- st.shardColl(dbC.getCollection("data" + i), key);
-}
+ var key = {
+ _id: 1
+ };
+ for (var i = 0; i < numColls; i++) {
+ st.shardColl(dbA.getCollection("data" + i), key);
+ st.shardColl(dbB.getCollection("data" + i), key);
+ st.shardColl(dbC.getCollection("data" + i), key);
+ }
-// Insert a document to an unsharded collection and make sure that the document is there.
-assert.writeOK(dbA.unsharded.insert({ dummy: 1 }));
-var shardName = config.databases.findOne({ _id: dbA.getName() }).primary;
-var shardHostConn = new Mongo(config.shards.findOne({ _id: shardName }).host);
-var dbAOnShard = shardHostConn.getDB(dbA.getName());
-assert.neq(null, dbAOnShard.unsharded.findOne({ dummy: 1 }));
+ // Insert a document to an unsharded collection and make sure that the document is there.
+ assert.writeOK(dbA.unsharded.insert({dummy: 1}));
+ var shardName = config.databases.findOne({_id: dbA.getName()}).primary;
+ var shardHostConn = new Mongo(config.shards.findOne({_id: shardName}).host);
+ var dbAOnShard = shardHostConn.getDB(dbA.getName());
+ assert.neq(null, dbAOnShard.unsharded.findOne({dummy: 1}));
-// Drop the non-suffixed db and ensure that it is the only one that was dropped.
-dbA.dropDatabase();
-var dbs = mongos.getDBNames();
-for (var i = 0; i < dbs.length; i++) {
- assert.neq(dbs, "" + dbA);
-}
+ // Drop the non-suffixed db and ensure that it is the only one that was dropped.
+ dbA.dropDatabase();
+ var dbs = mongos.getDBNames();
+ for (var i = 0; i < dbs.length; i++) {
+ assert.neq(dbs, "" + dbA);
+ }
-assert.eq(0, config.databases.count({ _id: dbA.getName() }));
-assert.eq(1, config.databases.count({ _id: dbB.getName() }));
-assert.eq(1, config.databases.count({ _id: dbC.getName() }));
+ assert.eq(0, config.databases.count({_id: dbA.getName()}));
+ assert.eq(1, config.databases.count({_id: dbB.getName()}));
+ assert.eq(1, config.databases.count({_id: dbC.getName()}));
-// 10 dropped collections
-assert.eq(numColls, config.collections.count({ _id: RegExp("^" + dbA + "\\..*"), dropped: true }));
+ // 10 dropped collections
+ assert.eq(numColls,
+ config.collections.count({_id: RegExp("^" + dbA + "\\..*"), dropped: true}));
-// 20 active (dropped is missing)
-assert.eq(numColls, config.collections.count({ _id: RegExp("^" + dbB + "\\..*") }));
-assert.eq(numColls, config.collections.count({ _id: RegExp("^" + dbC + "\\..*") }));
+ // 20 active (dropped is missing)
+ assert.eq(numColls, config.collections.count({_id: RegExp("^" + dbB + "\\..*")}));
+ assert.eq(numColls, config.collections.count({_id: RegExp("^" + dbC + "\\..*")}));
-for (var i = 0; i < numColls; i++) {
- assert.eq(numDocs / numColls, dbB.getCollection("data" + (i % numColls)).find().itcount());
- assert.eq(numDocs / numColls, dbC.getCollection("data" + (i % numColls)).find().itcount());
-}
+ for (var i = 0; i < numColls; i++) {
+ assert.eq(numDocs / numColls, dbB.getCollection("data" + (i % numColls)).find().itcount());
+ assert.eq(numDocs / numColls, dbC.getCollection("data" + (i % numColls)).find().itcount());
+ }
-// Check that the unsharded collection should have been dropped.
-assert.eq(null, dbAOnShard.unsharded.findOne());
+ // Check that the unsharded collection should have been dropped.
+ assert.eq(null, dbAOnShard.unsharded.findOne());
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/dump_coll_metadata.js b/jstests/sharding/dump_coll_metadata.js
index 7fe91e0a75c..eb60af37cb4 100644
--- a/jstests/sharding/dump_coll_metadata.js
+++ b/jstests/sharding/dump_coll_metadata.js
@@ -2,56 +2,56 @@
// Tests that we can dump collection metadata via getShardVersion()
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards : 2, mongos : 1 });
+ var st = new ShardingTest({shards: 2, mongos: 1});
-var mongos = st.s0;
-var coll = mongos.getCollection( "foo.bar" );
-var admin = mongos.getDB( "admin" );
-var shards = mongos.getCollection( "config.shards" ).find().toArray();
-var shardAdmin = st.shard0.getDB( "admin" );
+ var mongos = st.s0;
+ var coll = mongos.getCollection("foo.bar");
+ var admin = mongos.getDB("admin");
+ var shards = mongos.getCollection("config.shards").find().toArray();
+ var shardAdmin = st.shard0.getDB("admin");
-assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB() + "" }));
-st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
-assert.commandWorked(admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }));
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
-assert.commandWorked(shardAdmin.runCommand({ getShardVersion : coll + "" }));
+ assert.commandWorked(shardAdmin.runCommand({getShardVersion: coll + ""}));
-// Make sure we have chunks information on the shard after the shard collection call
-var result =
- assert.commandWorked(shardAdmin.runCommand({ getShardVersion : coll + "", fullMetadata : true }));
-printjson(result);
-var metadata = result.metadata;
+ // Make sure we have chunks information on the shard after the shard collection call
+ var result = assert.commandWorked(
+ shardAdmin.runCommand({getShardVersion: coll + "", fullMetadata: true}));
+ printjson(result);
+ var metadata = result.metadata;
-assert.eq( metadata.chunks.length, 1 );
-assert.eq( metadata.pending.length, 0 );
-assert( metadata.chunks[0][0]._id + "" == MinKey + "" );
-assert( metadata.chunks[0][1]._id + "" == MaxKey + "" );
-assert( metadata.shardVersion + "" == result.global + "" );
+ assert.eq(metadata.chunks.length, 1);
+ assert.eq(metadata.pending.length, 0);
+ assert(metadata.chunks[0][0]._id + "" == MinKey + "");
+ assert(metadata.chunks[0][1]._id + "" == MaxKey + "");
+ assert(metadata.shardVersion + "" == result.global + "");
-// Make sure a collection with no metadata still returns the metadata field
-assert( shardAdmin.runCommand({ getShardVersion : coll + "xyz", fullMetadata : true })
- .metadata != undefined );
+ // Make sure a collection with no metadata still returns the metadata field
+ assert(shardAdmin.runCommand({getShardVersion: coll + "xyz", fullMetadata: true}).metadata !=
+ undefined);
-// Make sure we get multiple chunks after a split
-assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
+ // Make sure we get multiple chunks after a split
+ assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
-assert( shardAdmin.runCommand({ getShardVersion : coll + "" }).ok );
-printjson( shardAdmin.runCommand({ getShardVersion : coll + "", fullMetadata : true }) );
+ assert(shardAdmin.runCommand({getShardVersion: coll + ""}).ok);
+ printjson(shardAdmin.runCommand({getShardVersion: coll + "", fullMetadata: true}));
-// Make sure we have chunks info
-result = shardAdmin.runCommand({ getShardVersion : coll + "", fullMetadata : true });
-metadata = result.metadata;
+ // Make sure we have chunks info
+ result = shardAdmin.runCommand({getShardVersion: coll + "", fullMetadata: true});
+ metadata = result.metadata;
-assert.eq( metadata.chunks.length, 2 );
-assert.eq( metadata.pending.length, 0 );
-assert( metadata.chunks[0][0]._id + "" == MinKey + "" );
-assert( metadata.chunks[0][1]._id == 0 );
-assert( metadata.chunks[1][0]._id == 0 );
-assert( metadata.chunks[1][1]._id + "" == MaxKey + "" );
-assert( metadata.shardVersion + "" == result.global + "" );
+ assert.eq(metadata.chunks.length, 2);
+ assert.eq(metadata.pending.length, 0);
+ assert(metadata.chunks[0][0]._id + "" == MinKey + "");
+ assert(metadata.chunks[0][1]._id == 0);
+ assert(metadata.chunks[1][0]._id == 0);
+ assert(metadata.chunks[1][1]._id + "" == MaxKey + "");
+ assert(metadata.shardVersion + "" == result.global + "");
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/empty_cluster_init.js b/jstests/sharding/empty_cluster_init.js
index e1251440c35..dfbc0604fe7 100644
--- a/jstests/sharding/empty_cluster_init.js
+++ b/jstests/sharding/empty_cluster_init.js
@@ -1,13 +1,11 @@
//
// Tests initialization of an empty cluster with multiple mongoses.
-// Starts a bunch of mongoses in parallel, and ensures that there's only a single config
+// Starts a bunch of mongoses in parallel, and ensures that there's only a single config
// version initialization.
//
-var configRS = new ReplSetTest({ name: "configRS", nodes: 3, useHostName: true });
-configRS.startSet({ configsvr: '',
- journal: "",
- storageEngine: 'wiredTiger' });
+var configRS = new ReplSetTest({name: "configRS", nodes: 3, useHostName: true});
+configRS.startSet({configsvr: '', journal: "", storageEngine: 'wiredTiger'});
var replConfig = configRS.getReplSetConfig();
replConfig.configsvr = true;
configRS.initiate(replConfig);
@@ -20,9 +18,8 @@ jsTest.log("Starting first set of mongoses in parallel...");
var mongoses = [];
for (var i = 0; i < 3; i++) {
- var mongos = MongoRunner.runMongos({ binVersion: "latest",
- configdb: configRS.getURL(),
- waitForConnect : false });
+ var mongos = MongoRunner.runMongos(
+ {binVersion: "latest", configdb: configRS.getURL(), waitForConnect: false});
mongoses.push(mongos);
}
@@ -33,13 +30,12 @@ assert.soon(function() {
try {
mongosConn = new Mongo(mongoses[0].host);
return true;
- }
- catch (e) {
+ } catch (e) {
print("Waiting for connect...");
printjson(e);
return false;
}
-}, "Mongos " + mongoses[0].host + " did not start.", 5 * 60 * 1000 );
+}, "Mongos " + mongoses[0].host + " did not start.", 5 * 60 * 1000);
var version = mongosConn.getCollection("config.version").findOne();
@@ -50,9 +46,8 @@ var version = mongosConn.getCollection("config.version").findOne();
jsTest.log("Starting second set of mongoses...");
for (var i = 0; i < 3; i++) {
- var mongos = MongoRunner.runMongos({ binVersion: "latest",
- configdb: configRS.getURL(),
- waitForConnect: false });
+ var mongos = MongoRunner.runMongos(
+ {binVersion: "latest", configdb: configRS.getURL(), waitForConnect: false});
mongoses.push(mongos);
}
@@ -61,8 +56,7 @@ assert.soon(function() {
try {
mongosConn = new Mongo(mongoses[mongoses.length - 1].host);
return true;
- }
- catch (e) {
+ } catch (e) {
print("Waiting for connect...");
printjson(e);
return false;
@@ -84,8 +78,7 @@ assert(version.clusterId);
assert.eq(undefined, version.excluding);
var oplog = configRS.getPrimary().getDB('local').oplog.rs;
-var updates = oplog.find({ ns: "config.version" }).toArray();
+var updates = oplog.find({ns: "config.version"}).toArray();
assert.eq(1, updates.length, 'ops to config.version: ' + tojson(updates));
configRS.stopSet(15);
-
diff --git a/jstests/sharding/empty_doc_results.js b/jstests/sharding/empty_doc_results.js
index 6128bffd37b..be63f509532 100644
--- a/jstests/sharding/empty_doc_results.js
+++ b/jstests/sharding/empty_doc_results.js
@@ -2,25 +2,25 @@
// Verifies that mongos correctly handles empty documents when all fields are projected out
//
-var options = { mongosOptions : { binVersion : "" },
- shardOptions : { binVersion : "" } };
+var options = {
+ mongosOptions: {binVersion: ""},
+ shardOptions: {binVersion: ""}
+};
-var st = new ShardingTest({ shards : 2, other : options });
+var st = new ShardingTest({shards: 2, other: options});
var mongos = st.s0;
var coll = mongos.getCollection("foo.bar");
var admin = mongos.getDB("admin");
var shards = mongos.getDB("config").shards.find().toArray();
-assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB().getName() }));
-printjson(admin.runCommand({ movePrimary : coll.getDB().getName(), to : shards[0]._id }));
-assert.commandWorked(admin.runCommand({ shardCollection: coll.getFullName(),
- key: { _id : 1 } }));
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()}));
+printjson(admin.runCommand({movePrimary: coll.getDB().getName(), to: shards[0]._id}));
+assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
-assert.commandWorked(admin.runCommand({ split : coll.getFullName(), middle : { _id : 0 } }));
-assert.commandWorked(admin.runCommand({ moveChunk : coll.getFullName(),
- find : { _id : 0 },
- to : shards[1]._id }));
+assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {_id: 0}}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: shards[1]._id}));
st.printShardingStatus();
@@ -36,10 +36,10 @@ for (var i = -50; i < 50; i++) {
//
// Ensure projecting out all fields still returns the same number of documents
assert.eq(100, coll.find({}).itcount());
-assert.eq(100, coll.find({}).sort({ positiveId : 1 }).itcount());
-assert.eq(100, coll.find({}, { _id : 0, positiveId : 0 }).itcount());
+assert.eq(100, coll.find({}).sort({positiveId: 1}).itcount());
+assert.eq(100, coll.find({}, {_id: 0, positiveId: 0}).itcount());
// Can't remove sort key from projection (SERVER-11877) but some documents will still be empty
-assert.eq(100, coll.find({}, { _id : 0 }).sort({ positiveId : 1 }).itcount());
+assert.eq(100, coll.find({}, {_id: 0}).sort({positiveId: 1}).itcount());
//
//
@@ -50,8 +50,7 @@ var assertLast50Positive = function(sortedDocs) {
for (var i = 0; i < sortedDocs.length; ++i) {
if (sortedDocs[i].positiveId) {
positiveCount++;
- }
- else {
+ } else {
// Make sure only the last set of documents have "positiveId" set
assert.eq(positiveCount, 0);
}
@@ -59,8 +58,8 @@ var assertLast50Positive = function(sortedDocs) {
assert.eq(positiveCount, 50);
};
-assertLast50Positive(coll.find({}).sort({ positiveId : 1 }).toArray());
-assertLast50Positive(coll.find({}, { _id : 0 }).sort({ positiveId : 1 }).toArray());
+assertLast50Positive(coll.find({}).sort({positiveId: 1}).toArray());
+assertLast50Positive(coll.find({}, {_id: 0}).sort({positiveId: 1}).toArray());
jsTest.log("DONE!");
st.stop(); \ No newline at end of file
diff --git a/jstests/sharding/enable_sharding_basic.js b/jstests/sharding/enable_sharding_basic.js
index eb58df59d57..531a2efe57f 100644
--- a/jstests/sharding/enable_sharding_basic.js
+++ b/jstests/sharding/enable_sharding_basic.js
@@ -3,53 +3,51 @@
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({mongos:1, shards:2});
+ var st = new ShardingTest({mongos: 1, shards: 2});
-var mongos = st.s0;
+ var mongos = st.s0;
-// enableSharing can run only on mongos.
-assert.commandFailedWithCode(st.d0.getDB('admin').runCommand({enableSharding : 'db'}),
- ErrorCodes.CommandNotFound);
+ // enableSharing can run only on mongos.
+ assert.commandFailedWithCode(st.d0.getDB('admin').runCommand({enableSharding: 'db'}),
+ ErrorCodes.CommandNotFound);
-// enableSharing can run only against the admin database.
-assert.commandFailedWithCode(mongos.getDB('test').runCommand({enableSharding : 'db'}),
- ErrorCodes.Unauthorized);
+ // enableSharing can run only against the admin database.
+ assert.commandFailedWithCode(mongos.getDB('test').runCommand({enableSharding: 'db'}),
+ ErrorCodes.Unauthorized);
-// Can't shard 'config' database.
-assert.commandFailed(mongos.adminCommand({enableSharding : 'config'}));
+ // Can't shard 'config' database.
+ assert.commandFailed(mongos.adminCommand({enableSharding: 'config'}));
-// Can't shard 'local' database.
-assert.commandFailed(mongos.adminCommand({enableSharding : 'local'}));
+ // Can't shard 'local' database.
+ assert.commandFailed(mongos.adminCommand({enableSharding: 'local'}));
-// Can't shard 'admin' database.
-assert.commandFailed(mongos.adminCommand({enableSharding : 'admin'}));
+ // Can't shard 'admin' database.
+ assert.commandFailed(mongos.adminCommand({enableSharding: 'admin'}));
-// Can't shard db with the name that just differ on case.
-assert.commandWorked(mongos.adminCommand({enableSharding : 'db'}));
-assert.eq(mongos.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
+ // Can't shard db with the name that just differ on case.
+ assert.commandWorked(mongos.adminCommand({enableSharding: 'db'}));
+ assert.eq(mongos.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
-assert.commandFailedWithCode(mongos.adminCommand({enableSharding : 'DB'}),
- ErrorCodes.DatabaseDifferCase);
+ assert.commandFailedWithCode(mongos.adminCommand({enableSharding: 'DB'}),
+ ErrorCodes.DatabaseDifferCase);
-// Can't shard invalid db name.
-assert.commandFailed(mongos.adminCommand({enableSharding : 'a.b'}));
-assert.commandFailed(mongos.adminCommand({enableSharding : ''}));
+ // Can't shard invalid db name.
+ assert.commandFailed(mongos.adminCommand({enableSharding: 'a.b'}));
+ assert.commandFailed(mongos.adminCommand({enableSharding: ''}));
-// Can't shard already sharded database.
-assert.commandFailedWithCode(mongos.adminCommand({enableSharding : 'db'}),
- ErrorCodes.AlreadyInitialized);
-assert.eq(mongos.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
+ // Can't shard already sharded database.
+ assert.commandFailedWithCode(mongos.adminCommand({enableSharding: 'db'}),
+ ErrorCodes.AlreadyInitialized);
+ assert.eq(mongos.getDB('config').databases.findOne({_id: 'db'}).partitioned, true);
-// Verify config.databases metadata.
-assert.writeOK(mongos.getDB('unsharded').foo.insert({aKey: "aValue"}));
-assert.eq(mongos.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, false);
-assert.commandWorked(mongos.adminCommand({enableSharding : 'unsharded'}));
-assert.eq(mongos.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, true);
+ // Verify config.databases metadata.
+ assert.writeOK(mongos.getDB('unsharded').foo.insert({aKey: "aValue"}));
+ assert.eq(mongos.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, false);
+ assert.commandWorked(mongos.adminCommand({enableSharding: 'unsharded'}));
+ assert.eq(mongos.getDB('config').databases.findOne({_id: 'unsharded'}).partitioned, true);
-st.stop();
+ st.stop();
})();
-
-
diff --git a/jstests/sharding/error_propagation.js b/jstests/sharding/error_propagation.js
index 9948da66190..27336b5efb5 100644
--- a/jstests/sharding/error_propagation.js
+++ b/jstests/sharding/error_propagation.js
@@ -10,14 +10,14 @@
var db = st.getDB('test');
db.setSlaveOk(true);
- assert.writeOK(db.foo.insert({a:1}, {writeConcern: {w:3}}));
- assert.commandWorked(db.runCommand({aggregate: 'foo',
- pipeline: [{$project: {total: {'$add': ['$a', 1]}}}]}));
+ assert.writeOK(db.foo.insert({a: 1}, {writeConcern: {w: 3}}));
+ assert.commandWorked(
+ db.runCommand({aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}]}));
- assert.writeOK(db.foo.insert({a: [1, 2]}, {writeConcern: {w:3}}));
+ assert.writeOK(db.foo.insert({a: [1, 2]}, {writeConcern: {w: 3}}));
- var res = db.runCommand({aggregate: 'foo',
- pipeline: [{$project: {total: {'$add': ['$a', 1]}}}]});
+ var res =
+ db.runCommand({aggregate: 'foo', pipeline: [{$project: {total: {'$add': ['$a', 1]}}}]});
assert.commandFailed(res);
assert.eq("$add only supports numeric or date types, not Array", res.errmsg, printjson(res));
}());
diff --git a/jstests/sharding/exact_shard_key_target.js b/jstests/sharding/exact_shard_key_target.js
index e21e6be95b1..885647ec96e 100644
--- a/jstests/sharding/exact_shard_key_target.js
+++ b/jstests/sharding/exact_shard_key_target.js
@@ -4,70 +4,72 @@
// SERVER-14138
//
-var st = new ShardingTest({ shards : 2, verbose : 4 });
+var st = new ShardingTest({shards: 2, verbose: 4});
var mongos = st.s0;
var coll = mongos.getCollection("foo.bar");
var admin = mongos.getDB("admin");
var shards = mongos.getDB("config").shards.find().toArray();
-assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB().getName() }));
-printjson(admin.runCommand({ movePrimary : coll.getDB().getName(), to : shards[0]._id }));
-assert.commandWorked(admin.runCommand({ shardCollection: coll.getFullName(),
- key: { "a.b": 1 } }));
-assert.commandWorked(admin.runCommand({ split: coll.getFullName(), middle: { "a.b": 0 } }));
-assert.commandWorked(admin.runCommand({ moveChunk: coll.getFullName(),
- find: { "a.b": 0 },
- to: shards[1]._id }));
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()}));
+printjson(admin.runCommand({movePrimary: coll.getDB().getName(), to: shards[0]._id}));
+assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(), key: {"a.b": 1}}));
+assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {"a.b": 0}}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll.getFullName(), find: {"a.b": 0}, to: shards[1]._id}));
st.printShardingStatus();
//
// JustOne remove
coll.remove({});
-assert.writeOK(coll.insert({ _id : 1, a : { b : -1 } }));
-assert.writeOK(coll.insert({ _id : 2, a : { b : 1 } }));
+assert.writeOK(coll.insert({_id: 1, a: {b: -1}}));
+assert.writeOK(coll.insert({_id: 2, a: {b: 1}}));
// Need orphaned data to see the impact
-assert.writeOK(st.shard0.getCollection(coll.toString()).insert({ _id : 3, a : { b : 1 } }));
-assert.eq(1, coll.remove({ a : { b : 1 } }, { justOne : true }).nRemoved);
-assert.eq(2, st.shard0.getCollection(coll.toString()).count() +
- st.shard1.getCollection(coll.toString()).count() );
+assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: 3, a: {b: 1}}));
+assert.eq(1, coll.remove({a: {b: 1}}, {justOne: true}).nRemoved);
+assert.eq(2,
+ st.shard0.getCollection(coll.toString()).count() +
+ st.shard1.getCollection(coll.toString()).count());
//
// Non-multi update
coll.remove({});
-assert.writeOK(coll.insert({ _id : 1, a : { b : 1 } }));
-assert.writeOK(coll.insert({ _id : 2, a : { b : -1 } }));
+assert.writeOK(coll.insert({_id: 1, a: {b: 1}}));
+assert.writeOK(coll.insert({_id: 2, a: {b: -1}}));
// Need orphaned data to see the impact
-assert.writeOK(st.shard0.getCollection(coll.toString()).insert({ _id : 3, a : { b : 1 } }));
-assert.eq(1, coll.update({ a : { b : 1 } }, { $set : { updated : true } },
- { multi : false }).nMatched);
-assert.eq(1, st.shard0.getCollection(coll.toString()).count({ updated : true }) +
- st.shard1.getCollection(coll.toString()).count({ updated : true }) );
+assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: 3, a: {b: 1}}));
+assert.eq(1, coll.update({a: {b: 1}}, {$set: {updated: true}}, {multi: false}).nMatched);
+assert.eq(1,
+ st.shard0.getCollection(coll.toString()).count({updated: true}) +
+ st.shard1.getCollection(coll.toString()).count({updated: true}));
//
// Successive upserts (save()-style)
coll.remove({});
-assert.writeOK(coll.update({ _id : 1 }, { _id : 1, a : { b : 1 } }, { upsert : true }));
-assert.writeOK(coll.update({ _id : 1 }, { _id : 1, a : { b : 1 } }, { upsert : true }));
-assert.eq(1, st.shard0.getCollection(coll.toString()).count() +
- st.shard1.getCollection(coll.toString()).count() );
+assert.writeOK(coll.update({_id: 1}, {_id: 1, a: {b: 1}}, {upsert: true}));
+assert.writeOK(coll.update({_id: 1}, {_id: 1, a: {b: 1}}, {upsert: true}));
+assert.eq(1,
+ st.shard0.getCollection(coll.toString()).count() +
+ st.shard1.getCollection(coll.toString()).count());
//
// Successive upserts (replacement-style)
coll.remove({});
-assert.writeOK(coll.update({ a : { b : 1 } }, { a : { b : 1 } }, { upsert : true }));
-assert.writeOK(coll.update({ a : { b : 1 } }, { a : { b : 1 } }, { upsert : true }));
-assert.eq(1, st.shard0.getCollection(coll.toString()).count() +
- st.shard1.getCollection(coll.toString()).count() );
+assert.writeOK(coll.update({a: {b: 1}}, {a: {b: 1}}, {upsert: true}));
+assert.writeOK(coll.update({a: {b: 1}}, {a: {b: 1}}, {upsert: true}));
+assert.eq(1,
+ st.shard0.getCollection(coll.toString()).count() +
+ st.shard1.getCollection(coll.toString()).count());
//
// Successive upserts ($op-style)
coll.remove({});
-assert.writeOK(coll.update({ a : { b : 1 } }, { $set : { upserted : true } }, { upsert : true }));
-assert.writeOK(coll.update({ a : { b : 1 } }, { $set : { upserted : true } }, { upsert : true }));
-assert.eq(1, st.shard0.getCollection(coll.toString()).count() +
- st.shard1.getCollection(coll.toString()).count() );
+assert.writeOK(coll.update({a: {b: 1}}, {$set: {upserted: true}}, {upsert: true}));
+assert.writeOK(coll.update({a: {b: 1}}, {$set: {upserted: true}}, {upsert: true}));
+assert.eq(1,
+ st.shard0.getCollection(coll.toString()).count() +
+ st.shard1.getCollection(coll.toString()).count());
jsTest.log("DONE!");
st.stop();
diff --git a/jstests/sharding/explain_cmd.js b/jstests/sharding/explain_cmd.js
index fa81dbf6b79..767e26c7eb2 100644
--- a/jstests/sharding/explain_cmd.js
+++ b/jstests/sharding/explain_cmd.js
@@ -23,10 +23,8 @@ db.adminCommand({shardCollection: collSharded.getFullName(), key: {a: 1}});
for (var i = 1; i <= 2; i++) {
assert.commandWorked(db.adminCommand({split: collSharded.getFullName(), middle: {a: i}}));
- var shardName = "shard000" + (i-1);
- printjson(db.adminCommand({moveChunk: collSharded.getFullName(),
- find: {a: i},
- to: shardName}));
+ var shardName = "shard000" + (i - 1);
+ printjson(db.adminCommand({moveChunk: collSharded.getFullName(), find: {a: i}, to: shardName}));
}
// Put data on each shard.
@@ -40,13 +38,8 @@ st.printShardingStatus();
assert.eq(3, collSharded.count({b: 1}));
// Explain the scatter-gather count.
-explain = db.runCommand({
- explain: {
- count: collSharded.getName(),
- query: {b: 1}
- },
- verbosity: "allPlansExecution"
-});
+explain = db.runCommand(
+ {explain: {count: collSharded.getName(), query: {b: 1}}, verbosity: "allPlansExecution"});
// Validate some basic properties of the result.
printjson(explain);
@@ -58,10 +51,7 @@ assert.eq(2, explain.executionStats.executionStages.shards.length);
// An explain of a command that doesn't exist should fail gracefully.
explain = db.runCommand({
- explain: {
- nonexistent: collSharded.getName(),
- query: {b: 1}
- },
+ explain: {nonexistent: collSharded.getName(), query: {b: 1}},
verbosity: "allPlansExecution"
});
printjson(explain);
@@ -86,8 +76,8 @@ explain = db.runCommand({
ns: collUnsharded.getName(),
key: "a",
cond: "b",
- $reduce: function (curr, result) { },
- initial: { }
+ $reduce: function(curr, result) {},
+ initial: {}
}
},
verbosity: "allPlansExecution"
@@ -109,8 +99,8 @@ explain = db.runCommand({
ns: collSharded.getName(),
key: "a",
cond: "b",
- $reduce: function (curr, result) { },
- initial: { }
+ $reduce: function(curr, result) {},
+ initial: {}
}
},
verbosity: "allPlansExecution"
@@ -122,12 +112,7 @@ assert.commandFailed(explain);
// Explain a delete operation and verify that it hits all shards without the shard key
explain = db.runCommand({
- explain: {
- delete: collSharded.getName(),
- deletes: [
- {q: {b: 1}, limit: 0}
- ]
- },
+ explain: {delete: collSharded.getName(), deletes: [{q: {b: 1}, limit: 0}]},
verbosity: "allPlansExecution"
});
assert.commandWorked(explain, tojson(explain));
@@ -140,12 +125,7 @@ assert.eq(3, collSharded.count({b: 1}));
// Explain a delete operation and verify that it hits only one shard with the shard key
explain = db.runCommand({
- explain: {
- delete: collSharded.getName(),
- deletes: [
- {q: {a: 1}, limit: 0}
- ]
- },
+ explain: {delete: collSharded.getName(), deletes: [{q: {a: 1}, limit: 0}]},
verbosity: "allPlansExecution"
});
assert.commandWorked(explain, tojson(explain));
@@ -156,23 +136,15 @@ assert.eq(3, collSharded.count({b: 1}));
// Check that we fail gracefully if we try to do an explain of a write batch that has more
// than one operation in it.
explain = db.runCommand({
- explain: {
- delete: collSharded.getName(),
- deletes: [
- {q: {a: 1}, limit: 1},
- {q: {a: 2}, limit: 1}
- ]
- },
+ explain:
+ {delete: collSharded.getName(), deletes: [{q: {a: 1}, limit: 1}, {q: {a: 2}, limit: 1}]},
verbosity: "allPlansExecution"
});
assert.commandFailed(explain, tojson(explain));
// Explain a multi upsert operation and verify that it hits all shards
explain = db.runCommand({
- explain: {
- update: collSharded.getName(),
- updates: [{q: {}, u: {$set: {b: 10}}, multi: true}]
- },
+ explain: {update: collSharded.getName(), updates: [{q: {}, u: {$set: {b: 10}}, multi: true}]},
verbosity: "allPlansExecution"
});
assert.commandWorked(explain, tojson(explain));
@@ -186,10 +158,7 @@ assert.eq(0, collSharded.count({b: 10}));
// Explain an upsert operation and verify that it hits only a single shard
explain = db.runCommand({
- explain: {
- update: collSharded.getName(),
- updates: [{q: {a: 10}, u: {a: 10}, upsert: true}]
- },
+ explain: {update: collSharded.getName(), updates: [{q: {a: 10}, u: {a: 10}, upsert: true}]},
verbosity: "allPlansExecution"
});
assert.commandWorked(explain, tojson(explain));
@@ -199,11 +168,7 @@ assert.eq(0, collSharded.count({a: 10}));
// Explain an upsert operation which cannot be targeted, ensure an error is thrown
explain = db.runCommand({
- explain: {
- update: collSharded.getName(),
- updates: [{q: {b: 10}, u: {b: 10}, upsert: true}]
- },
+ explain: {update: collSharded.getName(), updates: [{q: {b: 10}, u: {b: 10}, upsert: true}]},
verbosity: "allPlansExecution"
});
assert.commandFailed(explain, tojson(explain));
-
diff --git a/jstests/sharding/explain_find_and_modify_sharded.js b/jstests/sharding/explain_find_and_modify_sharded.js
index 2833e2c6e03..7c1b10321c2 100644
--- a/jstests/sharding/explain_find_and_modify_sharded.js
+++ b/jstests/sharding/explain_find_and_modify_sharded.js
@@ -12,7 +12,9 @@
st.stopBalancer();
var testDB = st.s.getDB('test');
- var shardKey = {a: 1};
+ var shardKey = {
+ a: 1
+ };
// Create a collection with an index on the intended shard key.
var shardedColl = testDB.getCollection(collName);
@@ -25,33 +27,21 @@
assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()}));
var res = testDB.adminCommand({movePrimary: testDB.getName(), to: 'shard0000'});
assert(res.ok || res.errmsg == "it is already the primary");
- assert.commandWorked(testDB.adminCommand({
- shardCollection: shardedColl.getFullName(),
- key: shardKey
- }));
+ assert.commandWorked(
+ testDB.adminCommand({shardCollection: shardedColl.getFullName(), key: shardKey}));
// Split and move the chunks so that
// chunk { "a" : { "$minKey" : 1 } } -->> { "a" : 10 } is on shard0000
// chunk { "a" : 10 } -->> { "a" : { "$maxKey" : 1 } } is on shard0001
- assert.commandWorked(testDB.adminCommand({
- split: shardedColl.getFullName(),
- middle: {a: 10}
- }));
- assert.commandWorked(testDB.adminCommand({
- moveChunk: shardedColl.getFullName(),
- find: {a: 10},
- to: 'shard0001'
- }));
+ assert.commandWorked(testDB.adminCommand({split: shardedColl.getFullName(), middle: {a: 10}}));
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: shardedColl.getFullName(), find: {a: 10}, to: 'shard0001'}));
var res;
// Queries that do not involve the shard key are invalid.
res = testDB.runCommand({
- explain: {
- findAndModify: collName,
- query: {b: 1},
- remove: true
- },
+ explain: {findAndModify: collName, query: {b: 1}, remove: true},
verbosity: 'queryPlanner'
});
assert.commandFailed(res);
@@ -82,12 +72,7 @@
// Test that the explain command is routed to "shard0000" when targeting the lower chunk range.
res = testDB.runCommand({
- explain: {
- findAndModify: collName,
- query: {a: 0},
- update: {$inc: {b: 7}},
- upsert: true
- },
+ explain: {findAndModify: collName, query: {a: 0}, update: {$inc: {b: 7}}, upsert: true},
verbosity: 'queryPlanner'
});
assert.commandWorked(res);
@@ -95,11 +80,7 @@
// Test that the explain command is routed to "shard0001" when targeting the higher chunk range.
res = testDB.runCommand({
- explain: {
- findAndModify: collName,
- query: {a: 20, c: 5},
- remove: true
- },
+ explain: {findAndModify: collName, query: {a: 20, c: 5}, remove: true},
verbosity: 'executionStats'
});
assert.commandWorked(res);
diff --git a/jstests/sharding/explain_read_pref.js b/jstests/sharding/explain_read_pref.js
index eab0a190ad4..cdf1d1e74a4 100644
--- a/jstests/sharding/explain_read_pref.js
+++ b/jstests/sharding/explain_read_pref.js
@@ -12,18 +12,16 @@ var assertCorrectTargeting = function(explain, isMongos, secExpected) {
var serverInfo;
if (isMongos) {
serverInfo = explain.queryPlanner.winningPlan.shards[0].serverInfo;
- }
- else {
+ } else {
serverInfo = explain.serverInfo;
}
var explainDestConn = new Mongo(serverInfo.host + ':' + serverInfo.port);
- var isMaster = explainDestConn.getDB('admin').runCommand({ isMaster: 1 });
+ var isMaster = explainDestConn.getDB('admin').runCommand({isMaster: 1});
if (secExpected) {
assert(isMaster.secondary);
- }
- else {
+ } else {
assert(isMaster.ismaster);
}
};
@@ -34,34 +32,34 @@ var testAllModes = function(conn, isMongos) {
// { tag: 'two' } so we can test the interaction of modes and tags. Test
// a bunch of combinations.
[
- // mode, tagSets, expectedHost
- ['primary', undefined, false],
- ['primary', [{}], false],
+ // mode, tagSets, expectedHost
+ ['primary', undefined, false],
+ ['primary', [{}], false],
- ['primaryPreferred', undefined, false],
- ['primaryPreferred', [{tag: 'one'}], false],
- // Correctly uses primary and ignores the tag
- ['primaryPreferred', [{tag: 'two'}], false],
+ ['primaryPreferred', undefined, false],
+ ['primaryPreferred', [{tag: 'one'}], false],
+ // Correctly uses primary and ignores the tag
+ ['primaryPreferred', [{tag: 'two'}], false],
- ['secondary', undefined, true],
- ['secondary', [{tag: 'two'}], true],
- ['secondary', [{tag: 'doesntexist'}, {}], true],
- ['secondary', [{tag: 'doesntexist'}, {tag:'two'}], true],
+ ['secondary', undefined, true],
+ ['secondary', [{tag: 'two'}], true],
+ ['secondary', [{tag: 'doesntexist'}, {}], true],
+ ['secondary', [{tag: 'doesntexist'}, {tag: 'two'}], true],
- ['secondaryPreferred', undefined, true],
- ['secondaryPreferred', [{tag: 'one'}], false],
- ['secondaryPreferred', [{tag: 'two'}], true],
+ ['secondaryPreferred', undefined, true],
+ ['secondaryPreferred', [{tag: 'one'}], false],
+ ['secondaryPreferred', [{tag: 'two'}], true],
- // We don't have a way to alter ping times so we can't predict where an
- // untagged 'nearest' command should go, hence only test with tags.
- ['nearest', [{tag: 'one'}], false],
- ['nearest', [{tag: 'two'}], true]
+ // We don't have a way to alter ping times so we can't predict where an
+ // untagged 'nearest' command should go, hence only test with tags.
+ ['nearest', [{tag: 'one'}], false],
+ ['nearest', [{tag: 'two'}], true]
].forEach(function(args) {
var mode = args[0], tagSets = args[1], secExpected = args[2];
var testDB = conn.getDB('TestDB');
- conn.setSlaveOk(false); // purely rely on readPref
+ conn.setSlaveOk(false); // purely rely on readPref
jsTest.log('Testing mode: ' + mode + ', tag sets: ' + tojson(tagSets));
// .explain().find()
@@ -85,11 +83,8 @@ var testAllModes = function(conn, isMongos) {
assertCorrectTargeting(explain, isMongos, secExpected);
// .explain().group()
- explain = testDB.user.explain().group({
- key: {_id: 1},
- reduce: function(curr, result) {},
- initial: {}
- });
+ explain = testDB.user.explain().group(
+ {key: {_id: 1}, reduce: function(curr, result) {}, initial: {}});
assertCorrectTargeting(explain, isMongos, secExpected);
} finally {
// Restore old read pref.
@@ -98,7 +93,7 @@ var testAllModes = function(conn, isMongos) {
});
};
-var st = new ShardingTest({ shards: { rs0: { nodes: 2 }}});
+var st = new ShardingTest({shards: {rs0: {nodes: 2}}});
st.stopBalancer();
ReplSetTest.awaitRSClientHosts(st.s, st.rs0.nodes);
@@ -106,8 +101,14 @@ ReplSetTest.awaitRSClientHosts(st.s, st.rs0.nodes);
// Tag primary with { dc: 'ny', tag: 'one' }, secondary with { dc: 'ny', tag: 'two' }
var primary = st.rs0.getPrimary();
var secondary = st.rs0.getSecondary();
-var PRIMARY_TAG = { dc: 'ny', tag: 'one' };
-var SECONDARY_TAG = { dc: 'ny', tag: 'two' };
+var PRIMARY_TAG = {
+ dc: 'ny',
+ tag: 'one'
+};
+var SECONDARY_TAG = {
+ dc: 'ny',
+ tag: 'two'
+};
var rsConfig = primary.getDB("local").system.replset.findOne();
jsTest.log('got rsconf ' + tojson(rsConfig));
@@ -124,9 +125,8 @@ rsConfig.version++;
jsTest.log('new rsconf ' + tojson(rsConfig));
try {
- primary.adminCommand({ replSetReconfig: rsConfig });
-}
-catch(e) {
+ primary.adminCommand({replSetReconfig: rsConfig});
+} catch (e) {
jsTest.log('replSetReconfig error: ' + e);
}
@@ -135,10 +135,9 @@ st.rs0.awaitSecondaryNodes();
// Force mongos to reconnect after our reconfig and also create the test database
assert.soon(function() {
try {
- st.s.getDB('TestDB').runCommand({ create: 'TestColl' });
+ st.s.getDB('TestDB').runCommand({create: 'TestColl'});
return true;
- }
- catch (x) {
+ } catch (x) {
// Intentionally caused an error that forces mongos's monitor to refresh.
jsTest.log('Caught exception while doing dummy command: ' + tojson(x));
return false;
@@ -154,8 +153,8 @@ jsTest.log('got rsconf ' + tojson(rsConfig));
var replConn = new Mongo(st.rs0.getURL());
// Make sure replica set connection is ready
-_awaitRSHostViaRSMonitor(primary.name, { ok: true, tags: PRIMARY_TAG }, st.rs0.name);
-_awaitRSHostViaRSMonitor(secondary.name, { ok: true, tags: SECONDARY_TAG }, st.rs0.name);
+_awaitRSHostViaRSMonitor(primary.name, {ok: true, tags: PRIMARY_TAG}, st.rs0.name);
+_awaitRSHostViaRSMonitor(secondary.name, {ok: true, tags: SECONDARY_TAG}, st.rs0.name);
testAllModes(replConn, false);
diff --git a/jstests/sharding/fair_balancer_round.js b/jstests/sharding/fair_balancer_round.js
index 8373d6fb0d6..90fc345c8cb 100644
--- a/jstests/sharding/fair_balancer_round.js
+++ b/jstests/sharding/fair_balancer_round.js
@@ -2,9 +2,11 @@
// Tests that a balancer round loads newly sharded collection data
//
-var options = {mongosOptions : {verbose : 1}};
+var options = {
+ mongosOptions: {verbose: 1}
+};
-var st = new ShardingTest({shards : 2, mongos : 2, other : options});
+var st = new ShardingTest({shards: 2, mongos: 2, other: options});
// Stop balancer initially
st.stopBalancer();
@@ -14,27 +16,26 @@ var staleMongos = st.s1;
var coll = mongos.getCollection("foo.bar");
// Shard collection through first mongos
-assert(mongos.adminCommand({enableSharding : coll.getDB() + ""}).ok);
+assert(mongos.adminCommand({enableSharding: coll.getDB() + ""}).ok);
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-assert(mongos.adminCommand({shardCollection : coll + "", key : {_id : 1}}).ok);
+assert(mongos.adminCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
// Create a bunch of chunks
var numSplits = 20;
-for ( var i = 0; i < numSplits; i++) {
- assert(mongos.adminCommand({split : coll + "", middle : {_id : i}}).ok);
+for (var i = 0; i < numSplits; i++) {
+ assert(mongos.adminCommand({split: coll + "", middle: {_id: i}}).ok);
}
// Stop the first mongos who setup the cluster.
st.stopMongos(0);
// Start balancer, which lets the stale mongos balance
-assert.writeOK(staleMongos.getDB("config").settings.update({_id: "balancer"},
- {$set: {stopped: false}},
- true));
+assert.writeOK(staleMongos.getDB("config")
+ .settings.update({_id: "balancer"}, {$set: {stopped: false}}, true));
// Make sure we eventually start moving chunks
assert.soon(function() {
- return staleMongos.getCollection("config.changelog").count({what : /moveChunk/}) > 0;
+ return staleMongos.getCollection("config.changelog").count({what: /moveChunk/}) > 0;
}, "no balance happened", 5 * 60 * 1000);
jsTest.log("DONE!");
diff --git a/jstests/sharding/features1.js b/jstests/sharding/features1.js
index 727f3cb2f09..dc547ddad74 100644
--- a/jstests/sharding/features1.js
+++ b/jstests/sharding/features1.js
@@ -1,204 +1,244 @@
(function() {
-var s = new ShardingTest({ name: "features1", shards: 2, mongos: 1 });
+ var s = new ShardingTest({name: "features1", shards: 2, mongos: 1});
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
-// ---- can't shard system namespaces ----
+ // ---- can't shard system namespaces ----
-assert( ! s.admin.runCommand( { shardcollection : "test.system.blah" , key : { num : 1 } } ).ok , "shard system namespace" );
+ assert(!s.admin.runCommand({shardcollection: "test.system.blah", key: {num: 1}}).ok,
+ "shard system namespace");
-// ---- setup test.foo -----
+ // ---- setup test.foo -----
-s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+ s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
-db = s.getDB( "test" );
+ db = s.getDB("test");
-a = s._connections[0].getDB( "test" );
-b = s._connections[1].getDB( "test" );
+ a = s._connections[0].getDB("test");
+ b = s._connections[1].getDB("test");
-db.foo.ensureIndex( { y : 1 } );
+ db.foo.ensureIndex({y: 1});
-s.adminCommand( { split : "test.foo" , middle : { num : 10 } } );
-s.adminCommand( { movechunk : "test.foo",
- find : { num : 20 },
- to : s.getOther( s.getPrimaryShard( "test" ) ).name } );
+ s.adminCommand({split: "test.foo", middle: {num: 10}});
+ s.adminCommand(
+ {movechunk: "test.foo", find: {num: 20}, to: s.getOther(s.getPrimaryShard("test")).name});
-db.foo.save( { num : 5 } );
-db.foo.save( { num : 15 } );
+ db.foo.save({num: 5});
+ db.foo.save({num: 15});
-s.sync();
+ s.sync();
-// ---- make sure shard key index is everywhere ----
+ // ---- make sure shard key index is everywhere ----
-assert.eq( 3 , a.foo.getIndexKeys().length , "a index 1" );
-assert.eq( 3 , b.foo.getIndexKeys().length , "b index 1" );
+ assert.eq(3, a.foo.getIndexKeys().length, "a index 1");
+ assert.eq(3, b.foo.getIndexKeys().length, "b index 1");
-// ---- make sure if you add an index it goes everywhere ------
+ // ---- make sure if you add an index it goes everywhere ------
-db.foo.ensureIndex( { x : 1 } );
+ db.foo.ensureIndex({x: 1});
-s.sync();
+ s.sync();
-assert.eq( 4 , a.foo.getIndexKeys().length , "a index 2" );
-assert.eq( 4 , b.foo.getIndexKeys().length , "b index 2" );
+ assert.eq(4, a.foo.getIndexKeys().length, "a index 2");
+ assert.eq(4, b.foo.getIndexKeys().length, "b index 2");
-// ---- no unique indexes ------
+ // ---- no unique indexes ------
-db.foo.ensureIndex( { z : 1 } , true );
+ db.foo.ensureIndex({z: 1}, true);
-s.sync();
+ s.sync();
-assert.eq( 4 , a.foo.getIndexKeys().length , "a index 3" );
-assert.eq( 4 , b.foo.getIndexKeys().length , "b index 3" );
+ assert.eq(4, a.foo.getIndexKeys().length, "a index 3");
+ assert.eq(4, b.foo.getIndexKeys().length, "b index 3");
-db.foo.ensureIndex( { num : 1 , bar : 1 } , true );
-s.sync();
-assert.eq( 5 , b.foo.getIndexKeys().length , "c index 3" );
+ db.foo.ensureIndex({num: 1, bar: 1}, true);
+ s.sync();
+ assert.eq(5, b.foo.getIndexKeys().length, "c index 3");
-// ---- can't shard thing with unique indexes
+ // ---- can't shard thing with unique indexes
-db.foo2.ensureIndex( { a : 1 } );
-s.sync();
-printjson( db.foo2.getIndexes() );
-assert( s.admin.runCommand( { shardcollection : "test.foo2" , key : { num : 1 } } ).ok , "shard with index" );
+ db.foo2.ensureIndex({a: 1});
+ s.sync();
+ printjson(db.foo2.getIndexes());
+ assert(s.admin.runCommand({shardcollection: "test.foo2", key: {num: 1}}).ok,
+ "shard with index");
-db.foo3.ensureIndex( { a : 1 } , true );
-s.sync();
-printjson( db.foo3.getIndexes() );
-assert( ! s.admin.runCommand( { shardcollection : "test.foo3" , key : { num : 1 } } ).ok , "shard with unique index" );
+ db.foo3.ensureIndex({a: 1}, true);
+ s.sync();
+ printjson(db.foo3.getIndexes());
+ assert(!s.admin.runCommand({shardcollection: "test.foo3", key: {num: 1}}).ok,
+ "shard with unique index");
-db.foo7.ensureIndex( { num : 1 , a : 1 } , true );
-s.sync();
-printjson( db.foo7.getIndexes() );
-assert( s.admin.runCommand( { shardcollection : "test.foo7" , key : { num : 1 } } ).ok , "shard with ok unique index" );
+ db.foo7.ensureIndex({num: 1, a: 1}, true);
+ s.sync();
+ printjson(db.foo7.getIndexes());
+ assert(s.admin.runCommand({shardcollection: "test.foo7", key: {num: 1}}).ok,
+ "shard with ok unique index");
+ // ----- eval -----
-// ----- eval -----
+ db.foo2.save({num: 5, a: 7});
+ db.foo3.save({num: 5, a: 8});
-db.foo2.save( { num : 5 , a : 7 } );
-db.foo3.save( { num : 5 , a : 8 } );
+ assert.eq(1, db.foo3.count(), "eval pre1");
+ assert.eq(1, db.foo2.count(), "eval pre2");
-assert.eq( 1 , db.foo3.count() , "eval pre1" );
-assert.eq( 1 , db.foo2.count() , "eval pre2" );
+ assert.eq(8,
+ db.eval(function() {
+ return db.foo3.findOne().a;
+ }),
+ "eval 1 ");
+ assert.throws(function() {
+ db.eval(function() {
+ return db.foo2.findOne().a;
+ });
+ }, null, "eval 2");
-assert.eq( 8 , db.eval( function(){ return db.foo3.findOne().a; } ), "eval 1 " );
-assert.throws( function(){ db.eval( function(){ return db.foo2.findOne().a; } ); } , null , "eval 2" );
-
-assert.eq( 1 , db.eval( function(){ return db.foo3.count(); } ), "eval 3 " );
-assert.throws( function(){ db.eval( function(){ return db.foo2.count(); } ); } , null , "eval 4" );
-
-// ----- "eval" new command name SERVER-5588 -----
-var result;
-result = db.runCommand({eval: function () { return db.foo3.count(); } }); // non-sharded collection
-assert.eq(1, result.ok, "eval should work for non-sharded collection in cluster");
-
-result = db.runCommand({eval: function () { return db.foo2.count(); } }); // sharded collection
-assert.eq(0, result.ok, "eval should not work for sharded collection in cluster");
-
-
-// ---- unique shard key ----
-
-assert( s.admin.runCommand( { shardcollection : "test.foo4" , key : { num : 1 } , unique : true } ).ok , "shard with index and unique" );
-s.adminCommand( { split : "test.foo4" , middle : { num : 10 } } );
-
-s.admin.runCommand({ movechunk: "test.foo4", find: { num: 20 },
- to: s.getOther( s.getPrimaryShard( "test" ) ).name });
-
-assert.writeOK(db.foo4.save( { num : 5 } ));
-assert.writeOK(db.foo4.save( { num : 15 } ));
-s.sync();
-assert.eq( 1 , a.foo4.count() , "ua1" );
-assert.eq( 1 , b.foo4.count() , "ub1" );
-
-assert.eq( 2 , a.foo4.getIndexes().length , "ua2" );
-assert.eq( 2 , b.foo4.getIndexes().length , "ub2" );
-
-assert( a.foo4.getIndexes()[1].unique , "ua3" );
-assert( b.foo4.getIndexes()[1].unique , "ub3" );
-
-assert.eq( 2 , db.foo4.count() , "uc1" );
-db.foo4.save( { num : 7 } );
-assert.eq( 3 , db.foo4.count() , "uc2" );
-assert.writeError(db.foo4.save( { num : 7 } ));
-assert.eq( 3 , db.foo4.count() , "uc4" );
-
-// --- don't let you convertToCapped ----
-assert( ! db.foo4.isCapped() , "ca1" );
-assert( ! a.foo4.isCapped() , "ca2" );
-assert( ! b.foo4.isCapped() , "ca3" );
-assert( ! db.foo4.convertToCapped( 30000 ).ok , "ca30" );
-assert( ! db.foo4.isCapped() , "ca4" );
-assert( ! a.foo4.isCapped() , "ca5" );
-assert( ! b.foo4.isCapped() , "ca6" );
-
-// make sure i didn't break anything
-db.foo4a.save( { a : 1 } );
-assert( ! db.foo4a.isCapped() , "ca7" );
-db.foo4a.convertToCapped( 30000 );
-assert( db.foo4a.isCapped() , "ca8" );
-
-// --- don't let you shard a capped collection
-
-db.createCollection("foo5", {capped:true, size:30000});
-assert( db.foo5.isCapped() , "cb1" );
-var res = s.admin.runCommand( { shardcollection : "test.foo5" , key : { num : 1 } } );
-assert( !res.ok , "shard capped: " + tojson( res ) );
-
-
-// ----- group ----
-
-db.foo6.save( { a : 1 } );
-db.foo6.save( { a : 3 } );
-db.foo6.save( { a : 3 } );
-db.foo6.ensureIndex( { a : 1 } );
-s.sync();
-printjson( db.foo6.getIndexes() );
-
-assert.eq( 2 , db.foo6.group( { key : { a : 1 } , initial : { count : 0 } ,
- reduce : function(z,prev){ prev.count++; } } ).length );
-
-assert.eq( 3 , db.foo6.find().count() );
-assert( s.admin.runCommand( { shardcollection : "test.foo6" , key : { a : 1 } } ).ok );
-assert.eq( 3 , db.foo6.find().count() );
-
-s.adminCommand( { split : "test.foo6" , middle : { a : 2 } } );
-
-//movechunk commands are wrapped in assert.soon
-//Sometimes the TO-side shard isn't immediately ready, this
-//causes problems on slow hosts.
-//Remove when SERVER-10232 is fixed
-
-assert.soon( function() {
- var cmdRes = s.admin.runCommand( { movechunk : "test.foo6",
- find : { a : 3 },
- to : s.getOther( s.getPrimaryShard( "test" ) ).name } );
- return cmdRes.ok;
-}, 'move chunk test.foo6', 60000, 1000 );
-
-assert.throws( function(){ db.foo6.group( { key : { a : 1 } , initial : { count : 0 } , reduce : function(z,prev){ prev.count++; } } ); } );
-
-
-// ---- can't shard non-empty collection without index -----
-
-assert.writeOK(db.foo8.save( { a : 1 } ));
-assert( ! s.admin.runCommand( { shardcollection : "test.foo8" , key : { a : 1 } } ).ok , "non-empty collection" );
-
-
-// ---- can't shard non-empty collection with null values in shard key ----
-
-assert.writeOK(db.foo9.save( { b : 1 } ));
-db.foo9.ensureIndex( { a : 1 } );
-assert( ! s.admin.runCommand( { shardcollection : "test.foo9" , key : { a : 1 } } ).ok , "entry with null value" );
-
-
-// --- listDatabases ---
-
-r = db.getMongo().getDBs();
-assert.eq(2, r.databases.length, tojson(r));
-assert.eq( "number", typeof(r.totalSize) , "listDatabases 2 : " + tojson( r ) );
-
-s.stop();
+ assert.eq(1,
+ db.eval(function() {
+ return db.foo3.count();
+ }),
+ "eval 3 ");
+ assert.throws(function() {
+ db.eval(function() {
+ return db.foo2.count();
+ });
+ }, null, "eval 4");
+
+ // ----- "eval" new command name SERVER-5588 -----
+ var result;
+ result = db.runCommand({
+ eval: function() {
+ return db.foo3.count();
+ }
+ }); // non-sharded collection
+ assert.eq(1, result.ok, "eval should work for non-sharded collection in cluster");
+
+ result = db.runCommand({
+ eval: function() {
+ return db.foo2.count();
+ }
+ }); // sharded collection
+ assert.eq(0, result.ok, "eval should not work for sharded collection in cluster");
+
+ // ---- unique shard key ----
+
+ assert(s.admin.runCommand({shardcollection: "test.foo4", key: {num: 1}, unique: true}).ok,
+ "shard with index and unique");
+ s.adminCommand({split: "test.foo4", middle: {num: 10}});
+
+ s.admin.runCommand(
+ {movechunk: "test.foo4", find: {num: 20}, to: s.getOther(s.getPrimaryShard("test")).name});
+
+ assert.writeOK(db.foo4.save({num: 5}));
+ assert.writeOK(db.foo4.save({num: 15}));
+ s.sync();
+ assert.eq(1, a.foo4.count(), "ua1");
+ assert.eq(1, b.foo4.count(), "ub1");
+
+ assert.eq(2, a.foo4.getIndexes().length, "ua2");
+ assert.eq(2, b.foo4.getIndexes().length, "ub2");
+
+ assert(a.foo4.getIndexes()[1].unique, "ua3");
+ assert(b.foo4.getIndexes()[1].unique, "ub3");
+
+ assert.eq(2, db.foo4.count(), "uc1");
+ db.foo4.save({num: 7});
+ assert.eq(3, db.foo4.count(), "uc2");
+ assert.writeError(db.foo4.save({num: 7}));
+ assert.eq(3, db.foo4.count(), "uc4");
+
+ // --- don't let you convertToCapped ----
+ assert(!db.foo4.isCapped(), "ca1");
+ assert(!a.foo4.isCapped(), "ca2");
+ assert(!b.foo4.isCapped(), "ca3");
+ assert(!db.foo4.convertToCapped(30000).ok, "ca30");
+ assert(!db.foo4.isCapped(), "ca4");
+ assert(!a.foo4.isCapped(), "ca5");
+ assert(!b.foo4.isCapped(), "ca6");
+
+ // make sure i didn't break anything
+ db.foo4a.save({a: 1});
+ assert(!db.foo4a.isCapped(), "ca7");
+ db.foo4a.convertToCapped(30000);
+ assert(db.foo4a.isCapped(), "ca8");
+
+ // --- don't let you shard a capped collection
+
+ db.createCollection("foo5", {capped: true, size: 30000});
+ assert(db.foo5.isCapped(), "cb1");
+ var res = s.admin.runCommand({shardcollection: "test.foo5", key: {num: 1}});
+ assert(!res.ok, "shard capped: " + tojson(res));
+
+ // ----- group ----
+
+ db.foo6.save({a: 1});
+ db.foo6.save({a: 3});
+ db.foo6.save({a: 3});
+ db.foo6.ensureIndex({a: 1});
+ s.sync();
+ printjson(db.foo6.getIndexes());
+
+ assert.eq(2,
+ db.foo6.group({
+ key: {a: 1},
+ initial: {count: 0},
+ reduce: function(z, prev) {
+ prev.count++;
+ }
+ }).length);
+
+ assert.eq(3, db.foo6.find().count());
+ assert(s.admin.runCommand({shardcollection: "test.foo6", key: {a: 1}}).ok);
+ assert.eq(3, db.foo6.find().count());
+
+ s.adminCommand({split: "test.foo6", middle: {a: 2}});
+
+ // movechunk commands are wrapped in assert.soon
+ // Sometimes the TO-side shard isn't immediately ready, this
+ // causes problems on slow hosts.
+ // Remove when SERVER-10232 is fixed
+
+ assert.soon(function() {
+ var cmdRes = s.admin.runCommand({
+ movechunk: "test.foo6",
+ find: {a: 3},
+ to: s.getOther(s.getPrimaryShard("test")).name
+ });
+ return cmdRes.ok;
+ }, 'move chunk test.foo6', 60000, 1000);
+
+ assert.throws(function() {
+ db.foo6.group({
+ key: {a: 1},
+ initial: {count: 0},
+ reduce: function(z, prev) {
+ prev.count++;
+ }
+ });
+ });
+
+ // ---- can't shard non-empty collection without index -----
+
+ assert.writeOK(db.foo8.save({a: 1}));
+ assert(!s.admin.runCommand({shardcollection: "test.foo8", key: {a: 1}}).ok,
+ "non-empty collection");
+
+ // ---- can't shard non-empty collection with null values in shard key ----
+
+ assert.writeOK(db.foo9.save({b: 1}));
+ db.foo9.ensureIndex({a: 1});
+ assert(!s.admin.runCommand({shardcollection: "test.foo9", key: {a: 1}}).ok,
+ "entry with null value");
+
+ // --- listDatabases ---
+
+ r = db.getMongo().getDBs();
+ assert.eq(2, r.databases.length, tojson(r));
+ assert.eq("number", typeof(r.totalSize), "listDatabases 2 : " + tojson(r));
+
+ s.stop();
})();
diff --git a/jstests/sharding/features2.js b/jstests/sharding/features2.js
index f632e24e80c..010289ac1cc 100644
--- a/jstests/sharding/features2.js
+++ b/jstests/sharding/features2.js
@@ -1,202 +1,210 @@
(function() {
-var s = new ShardingTest({ name: "features2", shards: 2, mongos: 1 });
+ var s = new ShardingTest({name: "features2", shards: 2, mongos: 1});
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
-a = s._connections[0].getDB( "test" );
-b = s._connections[1].getDB( "test" );
+ a = s._connections[0].getDB("test");
+ b = s._connections[1].getDB("test");
-db = s.getDB( "test" );
+ db = s.getDB("test");
-// ---- distinct ----
+ // ---- distinct ----
-db.foo.save( { x : 1 } );
-db.foo.save( { x : 2 } );
-db.foo.save( { x : 3 } );
-db.foo.ensureIndex( { x : 1 } );
+ db.foo.save({x: 1});
+ db.foo.save({x: 2});
+ db.foo.save({x: 3});
+ db.foo.ensureIndex({x: 1});
-assert.eq( "1,2,3" , db.foo.distinct( "x" ) , "distinct 1" );
-assert( a.foo.distinct("x").length == 3 || b.foo.distinct("x").length == 3 , "distinct 2" );
-assert( a.foo.distinct("x").length == 0 || b.foo.distinct("x").length == 0 , "distinct 3" );
+ assert.eq("1,2,3", db.foo.distinct("x"), "distinct 1");
+ assert(a.foo.distinct("x").length == 3 || b.foo.distinct("x").length == 3, "distinct 2");
+ assert(a.foo.distinct("x").length == 0 || b.foo.distinct("x").length == 0, "distinct 3");
-assert.eq( 1 , s.onNumShards( "foo" ) , "A1" );
+ assert.eq(1, s.onNumShards("foo"), "A1");
-s.shardColl( "foo" , { x : 1 } , { x : 2 } , { x : 3 }, null, true /* waitForDelete */ );
+ s.shardColl("foo", {x: 1}, {x: 2}, {x: 3}, null, true /* waitForDelete */);
-assert.eq( 2 , s.onNumShards( "foo" ) , "A2" );
+ assert.eq(2, s.onNumShards("foo"), "A2");
-assert.eq( "1,2,3" , db.foo.distinct( "x" ) , "distinct 4" );
+ assert.eq("1,2,3", db.foo.distinct("x"), "distinct 4");
-// ----- delete ---
+ // ----- delete ---
-assert.eq( 3 , db.foo.count() , "D1" );
+ assert.eq(3, db.foo.count(), "D1");
-db.foo.remove( { x : 3 } );
-assert.eq( 2 , db.foo.count() , "D2" );
+ db.foo.remove({x: 3});
+ assert.eq(2, db.foo.count(), "D2");
-db.foo.save( { x : 3 } );
-assert.eq( 3 , db.foo.count() , "D3" );
+ db.foo.save({x: 3});
+ assert.eq(3, db.foo.count(), "D3");
-db.foo.remove( { x : { $gt : 2 } } );
-assert.eq( 2 , db.foo.count() , "D4" );
+ db.foo.remove({x: {$gt: 2}});
+ assert.eq(2, db.foo.count(), "D4");
-db.foo.remove( { x : { $gt : -1 } } );
-assert.eq( 0 , db.foo.count() , "D5" );
+ db.foo.remove({x: {$gt: -1}});
+ assert.eq(0, db.foo.count(), "D5");
-db.foo.save( { x : 1 } );
-db.foo.save( { x : 2 } );
-db.foo.save( { x : 3 } );
-assert.eq( 3 , db.foo.count() , "D6" );
-db.foo.remove( {} );
-assert.eq( 0 , db.foo.count() , "D7" );
+ db.foo.save({x: 1});
+ db.foo.save({x: 2});
+ db.foo.save({x: 3});
+ assert.eq(3, db.foo.count(), "D6");
+ db.foo.remove({});
+ assert.eq(0, db.foo.count(), "D7");
-// --- _id key ---
+ // --- _id key ---
-db.foo2.save( { _id : new ObjectId() } );
-db.foo2.save( { _id : new ObjectId() } );
-db.foo2.save( { _id : new ObjectId() } );
+ db.foo2.save({_id: new ObjectId()});
+ db.foo2.save({_id: new ObjectId()});
+ db.foo2.save({_id: new ObjectId()});
-assert.eq( 1 , s.onNumShards( "foo2" ) , "F1" );
+ assert.eq(1, s.onNumShards("foo2"), "F1");
-printjson( db.foo2.getIndexes() );
-s.adminCommand( { shardcollection : "test.foo2" , key : { _id : 1 } } );
+ printjson(db.foo2.getIndexes());
+ s.adminCommand({shardcollection: "test.foo2", key: {_id: 1}});
-assert.eq( 3 , db.foo2.count() , "F2" );
-db.foo2.insert( {} );
-assert.eq( 4 , db.foo2.count() , "F3" );
+ assert.eq(3, db.foo2.count(), "F2");
+ db.foo2.insert({});
+ assert.eq(4, db.foo2.count(), "F3");
-// --- map/reduce
+ // --- map/reduce
-db.mr.save( { x : 1 , tags : [ "a" , "b" ] } );
-db.mr.save( { x : 2 , tags : [ "b" , "c" ] } );
-db.mr.save( { x : 3 , tags : [ "c" , "a" ] } );
-db.mr.save( { x : 4 , tags : [ "b" , "c" ] } );
-db.mr.ensureIndex( { x : 1 } );
+ db.mr.save({x: 1, tags: ["a", "b"]});
+ db.mr.save({x: 2, tags: ["b", "c"]});
+ db.mr.save({x: 3, tags: ["c", "a"]});
+ db.mr.save({x: 4, tags: ["b", "c"]});
+ db.mr.ensureIndex({x: 1});
-m = function(){
- this.tags.forEach(
- function(z){
- emit( z , { count : 1 } );
+ m = function() {
+ this.tags.forEach(function(z) {
+ emit(z, {count: 1});
+ });
+ };
+
+ r = function(key, values) {
+ var total = 0;
+ for (var i = 0; i < values.length; i++) {
+ total += values[i].count;
}
- );
-};
+ return {
+ count: total
+ };
+ };
+
+ doMR = function(n) {
+ print(n);
+
+ // on-disk
+
+ var res = db.mr.mapReduce(m, r, "smr1_out");
+ printjson(res);
+ assert.eq(4, res.counts.input, "MR T0 " + n);
+
+ var x = db[res.result];
+ assert.eq(3, x.find().count(), "MR T1 " + n);
+
+ var z = {};
+ x.find().forEach(function(a) {
+ z[a._id] = a.value.count;
+ });
+ assert.eq(3, Object.keySet(z).length, "MR T2 " + n);
+ assert.eq(2, z.a, "MR T3 " + n);
+ assert.eq(3, z.b, "MR T4 " + n);
+ assert.eq(3, z.c, "MR T5 " + n);
+
+ x.drop();
+
+ // inline
+
+ var res = db.mr.mapReduce(m, r, {out: {inline: 1}});
+ printjson(res);
+ assert.eq(4, res.counts.input, "MR T6 " + n);
+
+ var z = {};
+ res.find().forEach(function(a) {
+ z[a._id] = a.value.count;
+ });
+ printjson(z);
+ assert.eq(3, Object.keySet(z).length, "MR T7 " + n);
+ assert.eq(2, z.a, "MR T8 " + n);
+ assert.eq(3, z.b, "MR T9 " + n);
+ assert.eq(3, z.c, "MR TA " + n);
+
+ };
+
+ doMR("before");
-r = function( key , values ){
- var total = 0;
- for ( var i=0; i<values.length; i++ ){
- total += values[i].count;
+ assert.eq(1, s.onNumShards("mr"), "E1");
+ s.shardColl("mr", {x: 1}, {x: 2}, {x: 3}, null, true /* waitForDelete */);
+ assert.eq(2, s.onNumShards("mr"), "E1");
+
+ doMR("after");
+
+ s.adminCommand({split: 'test.mr', middle: {x: 3}});
+ s.adminCommand({split: 'test.mr', middle: {x: 4}});
+ s.adminCommand({movechunk: 'test.mr', find: {x: 3}, to: s.getPrimaryShard('test').name});
+
+ doMR("after extra split");
+
+ cmd = {
+ mapreduce: "mr",
+ map: "emit( ",
+ reduce: "fooz + ",
+ out: "broken1"
+ };
+
+ x = db.runCommand(cmd);
+ y = s._connections[0].getDB("test").runCommand(cmd);
+
+ printjson(x);
+ printjson(y);
+
+ // count
+
+ db.countaa.save({"regex": /foo/i});
+ db.countaa.save({"regex": /foo/i});
+ db.countaa.save({"regex": /foo/i});
+ assert.eq(3, db.countaa.count(), "counta1");
+ assert.eq(3, db.countaa.find().itcount(), "counta1");
+
+ x = null;
+ y = null;
+ try {
+ x = db.runCommand("forceerror");
+ } catch (e) {
+ x = e;
}
- return { count : total };
-};
-
-doMR = function( n ){
- print(n);
-
- // on-disk
-
- var res = db.mr.mapReduce( m , r , "smr1_out" );
- printjson( res );
- assert.eq( 4 , res.counts.input , "MR T0 " + n );
-
- var x = db[res.result];
- assert.eq( 3 , x.find().count() , "MR T1 " + n );
-
- var z = {};
- x.find().forEach( function(a){ z[a._id] = a.value.count; } );
- assert.eq( 3 , Object.keySet( z ).length , "MR T2 " + n );
- assert.eq( 2 , z.a , "MR T3 " + n );
- assert.eq( 3 , z.b , "MR T4 " + n );
- assert.eq( 3 , z.c , "MR T5 " + n );
-
- x.drop();
-
- // inline
-
- var res = db.mr.mapReduce( m , r , { out : { inline : 1 } } );
- printjson( res );
- assert.eq( 4 , res.counts.input , "MR T6 " + n );
-
- var z = {};
- res.find().forEach( function(a){ z[a._id] = a.value.count; } );
- printjson( z );
- assert.eq( 3 , Object.keySet( z ).length , "MR T7 " + n ) ;
- assert.eq( 2 , z.a , "MR T8 " + n );
- assert.eq( 3 , z.b , "MR T9 " + n );
- assert.eq( 3 , z.c , "MR TA " + n );
-
-};
-
-doMR( "before" );
-
-assert.eq( 1 , s.onNumShards( "mr" ) , "E1" );
-s.shardColl( "mr" , { x : 1 } , { x : 2 } , { x : 3 }, null, true /* waitForDelete */ );
-assert.eq( 2 , s.onNumShards( "mr" ) , "E1" );
-
-doMR( "after" );
-
-s.adminCommand({split:'test.mr' , middle:{x:3}} );
-s.adminCommand({split:'test.mr' , middle:{x:4}} );
-s.adminCommand({movechunk:'test.mr', find:{x:3}, to: s.getPrimaryShard('test').name } );
-
-doMR( "after extra split" );
-
-cmd = { mapreduce : "mr" , map : "emit( " , reduce : "fooz + " , out : "broken1" };
-
-x = db.runCommand( cmd );
-y = s._connections[0].getDB( "test" ).runCommand( cmd );
-
-printjson( x );
-printjson( y );
-
-// count
-
-db.countaa.save({"regex" : /foo/i});
-db.countaa.save({"regex" : /foo/i});
-db.countaa.save({"regex" : /foo/i});
-assert.eq( 3 , db.countaa.count() , "counta1" );
-assert.eq( 3 , db.countaa.find().itcount() , "counta1" );
-
-x = null; y = null;
-try {
- x = db.runCommand( "forceerror" );
-}
-catch ( e ){
- x = e;
-}
-try {
- y = s._connections[0].getDB( "test" ).runCommand( "forceerror" );
-}
-catch ( e ){
- y = e;
-}
-
-// As the forceerror command is written, it doesnt set a code in the reply.
-// OP_COMMAND changes will add a code of 121 (CommandFailed) if a failing command
-// does not set one, so this comparison fails as "undefined" != 121.
-//
-// TODO: Uncomment this line when OP_COMMAND is implemented in mongos (SERVER-18292)
-// as then MongoS should set code 121 as well.
-//
-// assert.eq( x.code , y.code , "assert format" )
-assert.eq( x.errmsg , y.errmsg , "assert format" );
-assert.eq( x.ok , y.ok , "assert format" );
-
-// isMaster and query-wrapped-command
-isMaster = db.runCommand({isMaster:1});
-assert( isMaster.ismaster );
-assert.eq( 'isdbgrid', isMaster.msg );
-delete isMaster.localTime;
-
-im2 = db.runCommand({query: {isMaster:1}});
-delete im2.localTime;
-assert.eq( isMaster, im2 );
-
-im2 = db.runCommand({$query: {isMaster:1}});
-delete im2.localTime;
-assert.eq( isMaster, im2 );
-
-s.stop();
+ try {
+ y = s._connections[0].getDB("test").runCommand("forceerror");
+ } catch (e) {
+ y = e;
+ }
+
+ // As the forceerror command is written, it doesnt set a code in the reply.
+ // OP_COMMAND changes will add a code of 121 (CommandFailed) if a failing command
+ // does not set one, so this comparison fails as "undefined" != 121.
+ //
+ // TODO: Uncomment this line when OP_COMMAND is implemented in mongos (SERVER-18292)
+ // as then MongoS should set code 121 as well.
+ //
+ // assert.eq( x.code , y.code , "assert format" )
+ assert.eq(x.errmsg, y.errmsg, "assert format");
+ assert.eq(x.ok, y.ok, "assert format");
+
+ // isMaster and query-wrapped-command
+ isMaster = db.runCommand({isMaster: 1});
+ assert(isMaster.ismaster);
+ assert.eq('isdbgrid', isMaster.msg);
+ delete isMaster.localTime;
+
+ im2 = db.runCommand({query: {isMaster: 1}});
+ delete im2.localTime;
+ assert.eq(isMaster, im2);
+
+ im2 = db.runCommand({$query: {isMaster: 1}});
+ delete im2.localTime;
+ assert.eq(isMaster, im2);
+
+ s.stop();
})();
diff --git a/jstests/sharding/features3.js b/jstests/sharding/features3.js
index 03a5cedc25a..0697e875d58 100644
--- a/jstests/sharding/features3.js
+++ b/jstests/sharding/features3.js
@@ -6,149 +6,149 @@
// - Verifies a $where query can be killed on multiple DBs
// - Tests fsync and fsync+lock permissions on sharded db
(function() {
-'use strict';
-
-var s = new ShardingTest({shards: 2, mongos: 1 });
-var dbForTest = s.getDB("test");
-dbForTest.foo.drop();
-
-var numDocs = 10000;
-
-// shard test.foo and add a split point
-s.adminCommand({enablesharding: "test"});
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand({shardcollection : "test.foo", key: {_id: 1}});
-s.adminCommand({split : "test.foo", middle: {_id: numDocs/2}});
-
-// move a chunk range to the non-primary shard
-s.adminCommand({moveChunk: "test.foo", find: {_id: 3},
- to: s.getNonPrimaries("test")[0], _waitForDelete: true});
-
-// restart balancer
-s.startBalancer();
-
-// insert 10k small documents into the sharded collection
-var bulk = dbForTest.foo.initializeUnorderedBulkOp();
-for (var i = 0; i < numDocs; i++) {
- bulk.insert({ _id: i });
-}
-assert.writeOK(bulk.execute());
-
-var x = dbForTest.foo.stats();
-
-// verify the colleciton has been sharded and documents are evenly distributed
-assert.eq("test.foo", x.ns, "namespace mismatch");
-assert(x.sharded, "collection is not sharded");
-assert.eq(numDocs, x.count, "total count");
-assert.eq(numDocs / 2, x.shards.shard0000.count, "count on shard0000");
-assert.eq(numDocs / 2, x.shards.shard0001.count, "count on shard0001");
-assert(x.totalIndexSize > 0);
-
-// insert one doc into a non-sharded collection
-dbForTest.bar.insert({x: 1});
-var x = dbForTest.bar.stats();
-assert.eq(1, x.count, "XXX1");
-assert.eq("test.bar", x.ns, "XXX2");
-assert(!x.sharded, "XXX3: " + tojson(x));
-
-// fork shell and start querying the data
-var start = new Date();
-
-var whereKillSleepTime = 1000;
-var parallelCommand =
- "db.foo.find(function() { " +
- " sleep(" + whereKillSleepTime + "); " +
- " return false; " +
- "}).itcount(); ";
-
-// fork a parallel shell, but do not wait for it to start
-print("about to fork new shell at: " + Date());
-var awaitShell = startParallelShell(parallelCommand, s.s.port);
-print("done forking shell at: " + Date());
-
-// Get all current $where operations
-function getInProgWhereOps() {
- var inprog = dbForTest.currentOp().inprog;
-
- // Find all the where queries
- var myProcs = [];
- inprog.forEach(function(op) {
- if (op.query && op.query.filter && op.query.filter.$where) {
- myProcs.push(op);
- }
+ 'use strict';
+
+ var s = new ShardingTest({shards: 2, mongos: 1});
+ var dbForTest = s.getDB("test");
+ dbForTest.foo.drop();
+
+ var numDocs = 10000;
+
+ // shard test.foo and add a split point
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+ s.adminCommand({split: "test.foo", middle: {_id: numDocs / 2}});
+
+ // move a chunk range to the non-primary shard
+ s.adminCommand({
+ moveChunk: "test.foo",
+ find: {_id: 3},
+ to: s.getNonPrimaries("test")[0],
+ _waitForDelete: true
});
- if (myProcs.length == 0) {
- print('No $where operations found: ' + tojson(inprog));
- }
- else {
- print('Found ' + myProcs.length + ' $where operations: ' + tojson(myProcs));
- }
-
- return myProcs;
-}
+ // restart balancer
+ s.startBalancer();
-var curOpState = 0; // 0 = not found, 1 = killed
-var killTime = null;
-var mine;
+ // insert 10k small documents into the sharded collection
+ var bulk = dbForTest.foo.initializeUnorderedBulkOp();
+ for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i});
+ }
+ assert.writeOK(bulk.execute());
+
+ var x = dbForTest.foo.stats();
+
+ // verify the colleciton has been sharded and documents are evenly distributed
+ assert.eq("test.foo", x.ns, "namespace mismatch");
+ assert(x.sharded, "collection is not sharded");
+ assert.eq(numDocs, x.count, "total count");
+ assert.eq(numDocs / 2, x.shards.shard0000.count, "count on shard0000");
+ assert.eq(numDocs / 2, x.shards.shard0001.count, "count on shard0001");
+ assert(x.totalIndexSize > 0);
+
+ // insert one doc into a non-sharded collection
+ dbForTest.bar.insert({x: 1});
+ var x = dbForTest.bar.stats();
+ assert.eq(1, x.count, "XXX1");
+ assert.eq("test.bar", x.ns, "XXX2");
+ assert(!x.sharded, "XXX3: " + tojson(x));
+
+ // fork shell and start querying the data
+ var start = new Date();
+
+ var whereKillSleepTime = 1000;
+ var parallelCommand = "db.foo.find(function() { " + " sleep(" + whereKillSleepTime + "); " +
+ " return false; " + "}).itcount(); ";
+
+ // fork a parallel shell, but do not wait for it to start
+ print("about to fork new shell at: " + Date());
+ var awaitShell = startParallelShell(parallelCommand, s.s.port);
+ print("done forking shell at: " + Date());
+
+ // Get all current $where operations
+ function getInProgWhereOps() {
+ var inprog = dbForTest.currentOp().inprog;
+
+ // Find all the where queries
+ var myProcs = [];
+ inprog.forEach(function(op) {
+ if (op.query && op.query.filter && op.query.filter.$where) {
+ myProcs.push(op);
+ }
+ });
-assert.soon(function() {
- // Get all the current operations
- mine = getInProgWhereOps();
+ if (myProcs.length == 0) {
+ print('No $where operations found: ' + tojson(inprog));
+ } else {
+ print('Found ' + myProcs.length + ' $where operations: ' + tojson(myProcs));
+ }
- // Wait for the queries to start (one per shard, so 2 total)
- if (curOpState == 0 && mine.length == 2) {
- // queries started
- curOpState = 1;
- // kill all $where
- mine.forEach(function(z) {
- printjson(dbForTest.getSisterDB("admin").killOp(z.opid));
- });
- killTime = new Date();
- }
- // Wait for killed queries to end
- else if (curOpState == 1 && mine.length == 0) {
- // Queries ended
- curOpState = 2;
- return true;
+ return myProcs;
}
-}, "Couldn't kill the $where operations.", 2 * 60 * 1000);
+ var curOpState = 0; // 0 = not found, 1 = killed
+ var killTime = null;
+ var mine;
+
+ assert.soon(function() {
+ // Get all the current operations
+ mine = getInProgWhereOps();
+
+ // Wait for the queries to start (one per shard, so 2 total)
+ if (curOpState == 0 && mine.length == 2) {
+ // queries started
+ curOpState = 1;
+ // kill all $where
+ mine.forEach(function(z) {
+ printjson(dbForTest.getSisterDB("admin").killOp(z.opid));
+ });
+ killTime = new Date();
+ }
+ // Wait for killed queries to end
+ else if (curOpState == 1 && mine.length == 0) {
+ // Queries ended
+ curOpState = 2;
+ return true;
+ }
+
+ }, "Couldn't kill the $where operations.", 2 * 60 * 1000);
-print("after loop: " + Date());
-assert(killTime, "timed out waiting too kill last mine:" + tojson(mine));
+ print("after loop: " + Date());
+ assert(killTime, "timed out waiting too kill last mine:" + tojson(mine));
-assert.eq( 2 , curOpState , "failed killing" );
+ assert.eq(2, curOpState, "failed killing");
-killTime = new Date().getTime() - killTime.getTime();
-print("killTime: " + killTime);
-print("time if run full: " + (numDocs * whereKillSleepTime));
-assert.gt(whereKillSleepTime * numDocs / 20, killTime, "took too long to kill");
+ killTime = new Date().getTime() - killTime.getTime();
+ print("killTime: " + killTime);
+ print("time if run full: " + (numDocs * whereKillSleepTime));
+ assert.gt(whereKillSleepTime * numDocs / 20, killTime, "took too long to kill");
-// wait for the parallel shell we spawned to complete
-var exitCode = awaitShell({checkExitSuccess: false});
-assert.neq(0, exitCode, "expected shell to exit abnormally due to JS execution being terminated");
+ // wait for the parallel shell we spawned to complete
+ var exitCode = awaitShell({checkExitSuccess: false});
+ assert.neq(
+ 0, exitCode, "expected shell to exit abnormally due to JS execution being terminated");
-var end = new Date();
-print("elapsed: " + (end.getTime() - start.getTime()));
+ var end = new Date();
+ print("elapsed: " + (end.getTime() - start.getTime()));
-// test fsync command on non-admin db
-x = dbForTest.runCommand("fsync");
-assert(!x.ok , "fsync on non-admin namespace should fail : " + tojson(x));
-assert(x.code == 13,
- "fsync on non-admin succeeded, but should have failed: " + tojson(x));
+ // test fsync command on non-admin db
+ x = dbForTest.runCommand("fsync");
+ assert(!x.ok, "fsync on non-admin namespace should fail : " + tojson(x));
+ assert(x.code == 13, "fsync on non-admin succeeded, but should have failed: " + tojson(x));
-// test fsync on admin db
-x = dbForTest._adminCommand("fsync");
-assert(x.ok == 1, "fsync failed: " + tojson(x));
-if ( x.all.shard0000 > 0 ) {
- assert(x.numFiles > 0, "fsync failed: " + tojson(x));
-}
+ // test fsync on admin db
+ x = dbForTest._adminCommand("fsync");
+ assert(x.ok == 1, "fsync failed: " + tojson(x));
+ if (x.all.shard0000 > 0) {
+ assert(x.numFiles > 0, "fsync failed: " + tojson(x));
+ }
-// test fsync+lock on admin db
-x = dbForTest._adminCommand({"fsync" :1, lock:true});
-assert(!x.ok, "lock should fail: " + tojson(x));
+ // test fsync+lock on admin db
+ x = dbForTest._adminCommand({"fsync": 1, lock: true});
+ assert(!x.ok, "lock should fail: " + tojson(x));
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/find_and_modify_after_multi_write.js b/jstests/sharding/find_and_modify_after_multi_write.js
index c8081ce9119..004fe8d8ead 100644
--- a/jstests/sharding/find_and_modify_after_multi_write.js
+++ b/jstests/sharding/find_and_modify_after_multi_write.js
@@ -1,93 +1,74 @@
(function() {
-"use strict";
-
-/**
- * Test that a targetted findAndModify will be properly routed after executing a write that
- * does not perform any shard version checks.
- */
-var runTest = function(writeFunc) {
- var st = new ShardingTest({ shards: 2, mongos: 2 });
-
- var testDB = st.s.getDB('test');
- testDB.dropDatabase();
-
- assert.commandWorked(testDB.adminCommand({ enableSharding: 'test' }));
- st.ensurePrimaryShard('test', 'shard0000');
-
- assert.commandWorked(testDB.adminCommand({ shardCollection: 'test.user', key: { x: 1 }}));
- assert.commandWorked(testDB.adminCommand({ split: 'test.user', middle: { x: 0 }}));
- assert.commandWorked(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0001',
- _waitForDelete: true }));
-
- var testDB2 = st.s1.getDB('test');
- testDB2.user.insert({ x: 123456 });
-
- // Move chunk to bump version on a different mongos.
- assert.commandWorked(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0000',
- _waitForDelete: true }));
-
- // Issue a targetted findAndModify and check that it was upserted to the right shard.
- assert.commandWorked(testDB2.runCommand({
- findAndModify: 'user',
- query: { x: 100 },
- update: { $set: { y: 1 }},
- upsert: true
- }));
-
- assert.neq(null, st.d0.getDB('test').user.findOne({ x: 100 }));
- assert.eq(null, st.d1.getDB('test').user.findOne({ x: 100 }));
-
- // At this point, s1 thinks the version of 'test.user' is 2, bounce it again so it gets
- // incremented to 3
- assert.commandWorked(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0001',
- _waitForDelete: true }));
-
- assert.commandWorked(testDB2.runCommand({
- findAndModify: 'user',
- query: { x: 200 },
- update: { $set: { y: 1 }},
- upsert: true
- }));
-
- assert.eq(null, st.d0.getDB('test').user.findOne({ x: 200 }));
- assert.neq(null, st.d1.getDB('test').user.findOne({ x: 200 }));
-
- // At this point, s0 thinks the version of 'test.user' is 3, bounce it again so it gets
- // incremented to 4
- assert.commandWorked(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0000',
- _waitForDelete: true }));
-
- // Ensure that write commands with multi version do not reset the connection shard version to
- // ignored.
- writeFunc(testDB2);
-
- assert.commandWorked(testDB2.runCommand({
- findAndModify: 'user',
- query: { x: 300 },
- update: { $set: { y: 1 }},
- upsert: true
- }));
-
- assert.neq(null, st.d0.getDB('test').user.findOne({ x: 300 }));
- assert.eq(null, st.d1.getDB('test').user.findOne({ x: 300 }));
-
- st.stop();
-};
-
-runTest(function(db) {
- db.user.update({}, { $inc: { y: 987654 }}, false, true);
-});
-
-runTest(function(db) {
- db.user.remove({ y: 'noMatch' }, false);
-});
+ "use strict";
+
+ /**
+ * Test that a targetted findAndModify will be properly routed after executing a write that
+ * does not perform any shard version checks.
+ */
+ var runTest = function(writeFunc) {
+ var st = new ShardingTest({shards: 2, mongos: 2});
+
+ var testDB = st.s.getDB('test');
+ testDB.dropDatabase();
+
+ assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', 'shard0000');
+
+ assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+ assert.commandWorked(testDB.adminCommand({split: 'test.user', middle: {x: 0}}));
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: 'shard0001', _waitForDelete: true}));
+
+ var testDB2 = st.s1.getDB('test');
+ testDB2.user.insert({x: 123456});
+
+ // Move chunk to bump version on a different mongos.
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: 'shard0000', _waitForDelete: true}));
+
+ // Issue a targetted findAndModify and check that it was upserted to the right shard.
+ assert.commandWorked(testDB2.runCommand(
+ {findAndModify: 'user', query: {x: 100}, update: {$set: {y: 1}}, upsert: true}));
+
+ assert.neq(null, st.d0.getDB('test').user.findOne({x: 100}));
+ assert.eq(null, st.d1.getDB('test').user.findOne({x: 100}));
+
+ // At this point, s1 thinks the version of 'test.user' is 2, bounce it again so it gets
+ // incremented to 3
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: 'shard0001', _waitForDelete: true}));
+
+ assert.commandWorked(testDB2.runCommand(
+ {findAndModify: 'user', query: {x: 200}, update: {$set: {y: 1}}, upsert: true}));
+
+ assert.eq(null, st.d0.getDB('test').user.findOne({x: 200}));
+ assert.neq(null, st.d1.getDB('test').user.findOne({x: 200}));
+
+ // At this point, s0 thinks the version of 'test.user' is 3, bounce it again so it gets
+ // incremented to 4
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: 'shard0000', _waitForDelete: true}));
+
+ // Ensure that write commands with multi version do not reset the connection shard version
+ // to
+ // ignored.
+ writeFunc(testDB2);
+
+ assert.commandWorked(testDB2.runCommand(
+ {findAndModify: 'user', query: {x: 300}, update: {$set: {y: 1}}, upsert: true}));
+
+ assert.neq(null, st.d0.getDB('test').user.findOne({x: 300}));
+ assert.eq(null, st.d1.getDB('test').user.findOne({x: 300}));
+
+ st.stop();
+ };
+
+ runTest(function(db) {
+ db.user.update({}, {$inc: {y: 987654}}, false, true);
+ });
+
+ runTest(function(db) {
+ db.user.remove({y: 'noMatch'}, false);
+ });
})();
diff --git a/jstests/sharding/find_getmore_cmd.js b/jstests/sharding/find_getmore_cmd.js
index f47201825ee..52fc6085507 100644
--- a/jstests/sharding/find_getmore_cmd.js
+++ b/jstests/sharding/find_getmore_cmd.js
@@ -28,11 +28,8 @@
st.ensurePrimaryShard(db.getName(), "shard0000");
db.adminCommand({shardCollection: coll.getFullName(), key: {_id: 1}});
assert.commandWorked(db.adminCommand({split: coll.getFullName(), middle: {_id: 0}}));
- assert.commandWorked(db.adminCommand({
- moveChunk: coll.getFullName(),
- find: {_id: 1},
- to: "shard0001"
- }));
+ assert.commandWorked(
+ db.adminCommand({moveChunk: coll.getFullName(), find: {_id: 1}, to: "shard0001"}));
// Find with no options.
cmdRes = db.runCommand({find: coll.getName()});
@@ -131,11 +128,8 @@
// User projection on $sortKey is illegal.
cmdRes = db.runCommand({find: coll.getName(), projection: {$sortKey: 1}, sort: {_id: 1}});
assert.commandFailed(cmdRes);
- cmdRes = db.runCommand({
- find: coll.getName(),
- projection: {$sortKey: {$meta: 'sortKey'}},
- sort: {_id: 1}
- });
+ cmdRes = db.runCommand(
+ {find: coll.getName(), projection: {$sortKey: {$meta: 'sortKey'}}, sort: {_id: 1}});
assert.commandFailed(cmdRes);
// User should be able to issue a sortKey meta-projection, as long as it's not on the reserved
diff --git a/jstests/sharding/findandmodify1.js b/jstests/sharding/findandmodify1.js
index 14b5786a379..08eb6602370 100644
--- a/jstests/sharding/findandmodify1.js
+++ b/jstests/sharding/findandmodify1.js
@@ -1,64 +1,66 @@
(function() {
-var s = new ShardingTest({ name: "find_and_modify_sharded", shards: 2 });
+ var s = new ShardingTest({name: "find_and_modify_sharded", shards: 2});
-s.adminCommand( { enablesharding : "test" } );
-db = s.getDB( "test" );
-s.ensurePrimaryShard('test', 'shard0001');
-primary = s.getPrimaryShard( "test" ).getDB( "test" );
-secondary = s.getOther( primary ).getDB( "test" );
+ s.adminCommand({enablesharding: "test"});
+ db = s.getDB("test");
+ s.ensurePrimaryShard('test', 'shard0001');
+ primary = s.getPrimaryShard("test").getDB("test");
+ secondary = s.getOther(primary).getDB("test");
-numObjs = 20;
+ numObjs = 20;
-// Turn balancer off - with small numbers of chunks the balancer tries to correct all imbalances, not just < 8
-s.s.getDB( "config" ).settings.update({ _id : "balancer" }, { $set : { stopped : true } }, true );
+ // Turn balancer off - with small numbers of chunks the balancer tries to correct all
+ // imbalances, not just < 8
+ s.s.getDB("config").settings.update({_id: "balancer"}, {$set: {stopped: true}}, true);
-s.adminCommand( { shardcollection : "test.stuff" , key : {_id:1} } );
+ s.adminCommand({shardcollection: "test.stuff", key: {_id: 1}});
-// pre-split the collection so to avoid interference from balancer
-s.adminCommand( { split: "test.stuff" , middle : { _id : numObjs/2 } } );
-s.adminCommand( { movechunk : "test.stuff" , find : { _id : numObjs/2 } , to : secondary.getMongo().name } ) ;
+ // pre-split the collection so to avoid interference from balancer
+ s.adminCommand({split: "test.stuff", middle: {_id: numObjs / 2}});
+ s.adminCommand(
+ {movechunk: "test.stuff", find: {_id: numObjs / 2}, to: secondary.getMongo().name});
-var bulk = db.stuff.initializeUnorderedBulkOp();
-for (var i=0; i < numObjs; i++){
- bulk.insert({_id: i});
-}
-assert.writeOK(bulk.execute());
+ var bulk = db.stuff.initializeUnorderedBulkOp();
+ for (var i = 0; i < numObjs; i++) {
+ bulk.insert({_id: i});
+ }
+ assert.writeOK(bulk.execute());
-// put two docs in each chunk (avoid the split in 0, since there are no docs less than 0)
-for (var i=2; i < numObjs; i+=2){
- if (i == numObjs/2)
- continue;
- s.adminCommand( { split: "test.stuff" , middle : {_id: i} } );
-}
+ // put two docs in each chunk (avoid the split in 0, since there are no docs less than 0)
+ for (var i = 2; i < numObjs; i += 2) {
+ if (i == numObjs / 2)
+ continue;
+ s.adminCommand({split: "test.stuff", middle: {_id: i}});
+ }
-s.printChunks();
-assert.eq( numObjs/2, s.config.chunks.count(), "split failed" );
-assert.eq( numObjs/4, s.config.chunks.count({ shard: "shard0000" }) );
-assert.eq( numObjs/4, s.config.chunks.count({ shard: "shard0001" }) );
+ s.printChunks();
+ assert.eq(numObjs / 2, s.config.chunks.count(), "split failed");
+ assert.eq(numObjs / 4, s.config.chunks.count({shard: "shard0000"}));
+ assert.eq(numObjs / 4, s.config.chunks.count({shard: "shard0001"}));
-// update
-for (var i=0; i < numObjs; i++){
- assert.eq(db.stuff.count({b:1}), i, "2 A");
+ // update
+ for (var i = 0; i < numObjs; i++) {
+ assert.eq(db.stuff.count({b: 1}), i, "2 A");
- var out = db.stuff.findAndModify({query: {_id:i, b:null}, update: {$set: {b:1}}});
- assert.eq(out._id, i, "2 E");
+ var out = db.stuff.findAndModify({query: {_id: i, b: null}, update: {$set: {b: 1}}});
+ assert.eq(out._id, i, "2 E");
- assert.eq(db.stuff.count({b:1}), i+1, "2 B");
-}
+ assert.eq(db.stuff.count({b: 1}), i + 1, "2 B");
+ }
-// remove
-for (var i=0; i < numObjs; i++){
- assert.eq(db.stuff.count(), numObjs - i, "3 A");
- assert.eq(db.stuff.count({_id: i}), 1, "3 B");
+ // remove
+ for (var i = 0; i < numObjs; i++) {
+ assert.eq(db.stuff.count(), numObjs - i, "3 A");
+ assert.eq(db.stuff.count({_id: i}), 1, "3 B");
- var out = db.stuff.findAndModify({remove: true, query: {_id:i}});
+ var out = db.stuff.findAndModify({remove: true, query: {_id: i}});
- assert.eq(db.stuff.count(), numObjs - i - 1, "3 C");
- assert.eq(db.stuff.count({_id: i}), 0, "3 D");
- assert.eq(out._id, i, "3 E");
-}
+ assert.eq(db.stuff.count(), numObjs - i - 1, "3 C");
+ assert.eq(db.stuff.count({_id: i}), 0, "3 D");
+ assert.eq(out._id, i, "3 E");
+ }
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/findandmodify2.js b/jstests/sharding/findandmodify2.js
index 2d0b8c6a7fc..afa727e77b9 100644
--- a/jstests/sharding/findandmodify2.js
+++ b/jstests/sharding/findandmodify2.js
@@ -1,10 +1,11 @@
-var s = new ShardingTest({ name: "find_and_modify_sharded_2", shards: 2, mongos: 1, other: { chunkSize: 1 }});
-s.adminCommand( { enablesharding : "test" } );
+var s = new ShardingTest(
+ {name: "find_and_modify_sharded_2", shards: 2, mongos: 1, other: {chunkSize: 1}});
+s.adminCommand({enablesharding: "test"});
-var db = s.getDB( "test" );
+var db = s.getDB("test");
s.ensurePrimaryShard('test', 'shard0001');
-var primary = s.getPrimaryShard( "test" ).getDB( "test" );
-var secondary = s.getOther( primary ).getDB( "test" );
+var primary = s.getPrimaryShard("test").getDB("test");
+var secondary = s.getOther(primary).getDB("test");
var n = 100;
var collection = "stuff";
@@ -18,7 +19,7 @@ var col_fam_upsert = col_fam + '_upsert';
var big = "x";
print("---------- Creating large payload...");
-for(var i=0;i<15;i++) {
+for (var i = 0; i < 15; i++) {
big += big;
}
print("---------- Done.");
@@ -37,46 +38,39 @@ s.adminCommand({shardcollection: 'test.' + col_fam_upsert, key: {_id: 1}});
// update via findAndModify
function via_fam() {
- for (var i=0; i<n; i++){
- db[col_fam].save({ _id: i });
- }
-
- for (var i=0; i<n; i++){
- db[col_fam].findAndModify({query: {_id: i}, update: { $set:
- { big: big }
- }});
- }
+ for (var i = 0; i < n; i++) {
+ db[col_fam].save({_id: i});
+ }
+
+ for (var i = 0; i < n; i++) {
+ db[col_fam].findAndModify({query: {_id: i}, update: {$set: {big: big}}});
+ }
}
// upsert via findAndModify
function via_fam_upsert() {
- for (var i=0; i<n; i++){
- db[col_fam_upsert].findAndModify({query: {_id: i}, update: { $set:
- { big: big }
- }, upsert: true});
- }
+ for (var i = 0; i < n; i++) {
+ db[col_fam_upsert].findAndModify(
+ {query: {_id: i}, update: {$set: {big: big}}, upsert: true});
+ }
}
// update data using basic update
function via_update() {
- for (var i=0; i<n; i++){
- db[col_update].save({ _id: i });
- }
-
- for (var i=0; i<n; i++){
- db[col_update].update({_id: i}, { $set:
- { big: big }
- });
- }
+ for (var i = 0; i < n; i++) {
+ db[col_update].save({_id: i});
+ }
+
+ for (var i = 0; i < n; i++) {
+ db[col_update].update({_id: i}, {$set: {big: big}});
+ }
}
// upsert data using basic update
function via_update_upsert() {
- for (var i=0; i<n; i++){
- db[col_update_upsert].update({_id: i}, { $set:
- { big: big }
- }, true);
- }
+ for (var i = 0; i < n; i++) {
+ db[col_update_upsert].update({_id: i}, {$set: {big: big}}, true);
+ }
}
print("---------- Update via findAndModify...");
@@ -98,14 +92,21 @@ print("---------- Done.");
print("---------- Printing chunks:");
s.printChunks();
-
print("---------- Verifying that both codepaths resulted in splits...");
-assert.gte( s.config.chunks.count({ "ns": "test." + col_fam }), minChunks, "findAndModify update code path didn't result in splits" );
-assert.gte( s.config.chunks.count({ "ns": "test." + col_fam_upsert }), minChunks, "findAndModify upsert code path didn't result in splits" );
-assert.gte( s.config.chunks.count({ "ns": "test." + col_update }), minChunks, "update code path didn't result in splits" );
-assert.gte( s.config.chunks.count({ "ns": "test." + col_update_upsert }), minChunks, "upsert code path didn't result in splits" );
-
-printjson( db[col_update].stats() );
+assert.gte(s.config.chunks.count({"ns": "test." + col_fam}),
+ minChunks,
+ "findAndModify update code path didn't result in splits");
+assert.gte(s.config.chunks.count({"ns": "test." + col_fam_upsert}),
+ minChunks,
+ "findAndModify upsert code path didn't result in splits");
+assert.gte(s.config.chunks.count({"ns": "test." + col_update}),
+ minChunks,
+ "update code path didn't result in splits");
+assert.gte(s.config.chunks.count({"ns": "test." + col_update_upsert}),
+ minChunks,
+ "upsert code path didn't result in splits");
+
+printjson(db[col_update].stats());
// ensure that all chunks are smaller than chunksize
// make sure not teensy
diff --git a/jstests/sharding/forget_mr_temp_ns.js b/jstests/sharding/forget_mr_temp_ns.js
index d972aa7dbe1..fd950bcf43c 100644
--- a/jstests/sharding/forget_mr_temp_ns.js
+++ b/jstests/sharding/forget_mr_temp_ns.js
@@ -2,44 +2,47 @@
// Tests whether we forget M/R's temporary namespaces for sharded output
//
-var st = new ShardingTest({ shards : 1, mongos : 1 });
+var st = new ShardingTest({shards: 1, mongos: 1});
var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var coll = mongos.getCollection( "foo.bar" );
-var outputColl = mongos.getCollection( (coll.getDB() + "") + ".mrOutput" );
+var admin = mongos.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
+var outputColl = mongos.getCollection((coll.getDB() + "") + ".mrOutput");
var bulk = coll.initializeUnorderedBulkOp();
-for ( var i = 0; i < 10; i++ ) {
- bulk.insert({ _id : i, even : (i % 2 == 0) });
+for (var i = 0; i < 10; i++) {
+ bulk.insert({_id: i, even: (i % 2 == 0)});
}
assert.writeOK(bulk.execute());
-var map = function() { emit( this.even, 1 ); };
-var reduce = function( key, values ) { return Array.sum(values); };
+var map = function() {
+ emit(this.even, 1);
+};
+var reduce = function(key, values) {
+ return Array.sum(values);
+};
-out = coll.mapReduce( map, reduce, { out: { reduce : outputColl.getName(), sharded: true } } );
+out = coll.mapReduce(map, reduce, {out: {reduce: outputColl.getName(), sharded: true}});
-printjson( out );
-printjson( outputColl.find().toArray() );
+printjson(out);
+printjson(outputColl.find().toArray());
-var mongodThreadStats = st.shard0.getDB( "admin" ).runCommand({ shardConnPoolStats : 1 }).threads;
-var mongosThreadStats = admin.runCommand({ shardConnPoolStats : 1 }).threads;
+var mongodThreadStats = st.shard0.getDB("admin").runCommand({shardConnPoolStats: 1}).threads;
+var mongosThreadStats = admin.runCommand({shardConnPoolStats: 1}).threads;
-printjson( mongodThreadStats );
-printjson( mongosThreadStats );
+printjson(mongodThreadStats);
+printjson(mongosThreadStats);
-var checkForSeenNS = function( threadStats, regex ) {
- for ( var i = 0; i < threadStats.length; i++ ) {
+var checkForSeenNS = function(threadStats, regex) {
+ for (var i = 0; i < threadStats.length; i++) {
var seenNSes = threadStats[i].seenNS;
- for ( var j = 0; j < seenNSes.length; j++ ) {
- assert( !( regex.test( seenNSes ) ) );
+ for (var j = 0; j < seenNSes.length; j++) {
+ assert(!(regex.test(seenNSes)));
}
}
};
-checkForSeenNS( mongodThreadStats, /^foo.tmp/ );
-checkForSeenNS( mongosThreadStats, /^foo.tmp/ );
+checkForSeenNS(mongodThreadStats, /^foo.tmp/);
+checkForSeenNS(mongosThreadStats, /^foo.tmp/);
st.stop();
-
diff --git a/jstests/sharding/fts_score_sort_sharded.js b/jstests/sharding/fts_score_sort_sharded.js
index daab28ff81e..e6bf01503be 100644
--- a/jstests/sharding/fts_score_sort_sharded.js
+++ b/jstests/sharding/fts_score_sort_sharded.js
@@ -13,29 +13,27 @@ var cursor;
// Pre-split collection: shard 0 takes {_id: {$lt: 0}}, shard 1 takes {_id: {$gte: 0}}.
//
assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()}));
-admin.runCommand({movePrimary: coll.getDB().getName(),
- to: "shard0000"});
-assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(),
- key: {_id: 1}}));
+admin.runCommand({movePrimary: coll.getDB().getName(), to: "shard0000"});
+assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {_id: 0}}));
-assert.commandWorked(admin.runCommand({moveChunk: coll.getFullName(),
- find: {_id: 0},
- to: "shard0001"}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: "shard0001"}));
//
// Insert documents into collection and create text index.
//
-assert.writeOK(coll.insert({ _id: 1, a: "pizza" }));
-assert.writeOK(coll.insert({ _id: -1, a: "pizza pizza" }));
-assert.writeOK(coll.insert({ _id: 2, a: "pizza pizza pizza" }));
-assert.writeOK(coll.insert({ _id: -2, a: "pizza pizza pizza pizza"}));
-assert.commandWorked(coll.ensureIndex({ a: "text" }));
+assert.writeOK(coll.insert({_id: 1, a: "pizza"}));
+assert.writeOK(coll.insert({_id: -1, a: "pizza pizza"}));
+assert.writeOK(coll.insert({_id: 2, a: "pizza pizza pizza"}));
+assert.writeOK(coll.insert({_id: -2, a: "pizza pizza pizza pizza"}));
+assert.commandWorked(coll.ensureIndex({a: "text"}));
//
// Execute query with sort on document score, verify results are in correct order.
//
-var results = coll.find({$text: {$search: "pizza"}},
- {s: {$meta: "textScore"}}).sort({s: {$meta: "textScore"}}).toArray();
+var results = coll.find({$text: {$search: "pizza"}}, {s: {$meta: "textScore"}})
+ .sort({s: {$meta: "textScore"}})
+ .toArray();
assert.eq(results.length, 4);
assert.eq(results[0]._id, -2);
assert.eq(results[1]._id, 2);
@@ -48,18 +46,26 @@ assert.eq(results[3]._id, 1);
// Projection not specified at all.
cursor = coll.find({$text: {$search: "pizza"}}).sort({s: {$meta: "textScore"}});
-assert.throws(function() { cursor.next(); });
+assert.throws(function() {
+ cursor.next();
+});
// Projection specified with incorrect field name.
-cursor = coll.find({$text: {$search: "pizza"}},
- {t: {$meta: "textScore"}}).sort({s: {$meta: "textScore"}});
-assert.throws(function() { cursor.next(); });
+cursor = coll.find({$text: {$search: "pizza"}}, {t: {$meta: "textScore"}})
+ .sort({s: {$meta: "textScore"}});
+assert.throws(function() {
+ cursor.next();
+});
// Projection specified on correct field but with wrong sort.
cursor = coll.find({$text: {$search: "pizza"}}, {s: 1}).sort({s: {$meta: "textScore"}});
-assert.throws(function() { cursor.next(); });
+assert.throws(function() {
+ cursor.next();
+});
cursor = coll.find({$text: {$search: "pizza"}}, {s: -1}).sort({s: {$meta: "textScore"}});
-assert.throws(function() { cursor.next(); });
+assert.throws(function() {
+ cursor.next();
+});
// TODO Test sort on compound key.
diff --git a/jstests/sharding/geo_near_random1.js b/jstests/sharding/geo_near_random1.js
index bff63ca2b18..0229c84555c 100644
--- a/jstests/sharding/geo_near_random1.js
+++ b/jstests/sharding/geo_near_random1.js
@@ -1,46 +1,52 @@
// This tests all points using $near
(function() {
-load("jstests/libs/geo_near_random.js");
-
-var testName = "geo_near_random1";
-var s = new ShardingTest({ name: testName, shards: 3 });
-
-db = s.getDB("test"); // global db
-
-var test = new GeoNearRandomTest(testName);
-
-s.adminCommand({enablesharding:'test'});
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand({shardcollection: ('test.' + testName), key: {_id:1} });
-
-test.insertPts(50);
-
-for (var i = (test.nPts/10); i < test.nPts; i+= (test.nPts/10)){
- s.adminCommand({split: ('test.' + testName), middle: {_id: i} });
- try {
- s.adminCommand({ moveChunk: ('test.' + testName), find: { _id: i-1 },
- to: ('shard000' + (i%3)), _waitForDelete: true });
- } catch (e) {
- // ignore this error
- if (! e.message.match(/that chunk is already on that shard/)){
- throw e;
+ load("jstests/libs/geo_near_random.js");
+
+ var testName = "geo_near_random1";
+ var s = new ShardingTest({name: testName, shards: 3});
+
+ db = s.getDB("test"); // global db
+
+ var test = new GeoNearRandomTest(testName);
+
+ s.adminCommand({enablesharding: 'test'});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: ('test.' + testName), key: {_id: 1}});
+
+ test.insertPts(50);
+
+ for (var i = (test.nPts / 10); i < test.nPts; i += (test.nPts / 10)) {
+ s.adminCommand({split: ('test.' + testName), middle: {_id: i}});
+ try {
+ s.adminCommand({
+ moveChunk: ('test.' + testName),
+ find: {_id: i - 1},
+ to: ('shard000' + (i % 3)),
+ _waitForDelete: true
+ });
+ } catch (e) {
+ // ignore this error
+ if (!e.message.match(/that chunk is already on that shard/)) {
+ throw e;
+ }
}
}
-}
-// Turn balancer back on, for actual tests
-// s.startBalancer() // SERVER-13365
+ // Turn balancer back on, for actual tests
+ // s.startBalancer() // SERVER-13365
-printShardingSizes();
+ printShardingSizes();
-var opts = {sharded: true};
-test.testPt([0,0], opts);
-test.testPt(test.mkPt(), opts);
-test.testPt(test.mkPt(), opts);
-test.testPt(test.mkPt(), opts);
-test.testPt(test.mkPt(), opts);
+ var opts = {
+ sharded: true
+ };
+ test.testPt([0, 0], opts);
+ test.testPt(test.mkPt(), opts);
+ test.testPt(test.mkPt(), opts);
+ test.testPt(test.mkPt(), opts);
+ test.testPt(test.mkPt(), opts);
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/geo_near_random2.js b/jstests/sharding/geo_near_random2.js
index cc566607113..cdf8543274a 100644
--- a/jstests/sharding/geo_near_random2.js
+++ b/jstests/sharding/geo_near_random2.js
@@ -1,50 +1,59 @@
// This tests 1% of all points using $near and $nearSphere
(function() {
-load("jstests/libs/geo_near_random.js");
-
-var testName = "geo_near_random2";
-var s = new ShardingTest({ name: testName, shards: 3 });
-
-db = s.getDB("test"); // global db
-
-var test = new GeoNearRandomTest(testName);
-
-s.adminCommand({enablesharding:'test'});
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand({shardcollection: ('test.' + testName), key: {_id:1} });
-
-test.insertPts(5000);
-
-for (var i = (test.nPts/10); i < test.nPts; i+= (test.nPts/10)){
- s.adminCommand({split: ('test.' + testName), middle: {_id: i} });
- try {
- s.adminCommand({moveChunk: ('test.' + testName), find: {_id: i-1}, to: ('shard000' + (i%3)), _waitForDelete : true });
- } catch (e) {
- // ignore this error
- if (! e.message.match(/that chunk is already on that shard/)){
- throw e;
+ load("jstests/libs/geo_near_random.js");
+
+ var testName = "geo_near_random2";
+ var s = new ShardingTest({name: testName, shards: 3});
+
+ db = s.getDB("test"); // global db
+
+ var test = new GeoNearRandomTest(testName);
+
+ s.adminCommand({enablesharding: 'test'});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: ('test.' + testName), key: {_id: 1}});
+
+ test.insertPts(5000);
+
+ for (var i = (test.nPts / 10); i < test.nPts; i += (test.nPts / 10)) {
+ s.adminCommand({split: ('test.' + testName), middle: {_id: i}});
+ try {
+ s.adminCommand({
+ moveChunk: ('test.' + testName),
+ find: {_id: i - 1},
+ to: ('shard000' + (i % 3)),
+ _waitForDelete: true
+ });
+ } catch (e) {
+ // ignore this error
+ if (!e.message.match(/that chunk is already on that shard/)) {
+ throw e;
+ }
}
}
-}
-
-//Turn balancer back on, for actual tests
-// s.startBalancer(); // SERVER-13365
-
-opts = {sphere:0, nToTest:test.nPts*0.01, sharded:true};
-test.testPt([0,0], opts);
-test.testPt(test.mkPt(), opts);
-test.testPt(test.mkPt(), opts);
-test.testPt(test.mkPt(), opts);
-test.testPt(test.mkPt(), opts);
-
-opts.sphere = 1;
-test.testPt([0,0], opts);
-test.testPt(test.mkPt(0.8), opts);
-test.testPt(test.mkPt(0.8), opts);
-test.testPt(test.mkPt(0.8), opts);
-test.testPt(test.mkPt(0.8), opts);
-
-s.stop();
+
+ // Turn balancer back on, for actual tests
+ // s.startBalancer(); // SERVER-13365
+
+ opts = {
+ sphere: 0,
+ nToTest: test.nPts * 0.01,
+ sharded: true
+ };
+ test.testPt([0, 0], opts);
+ test.testPt(test.mkPt(), opts);
+ test.testPt(test.mkPt(), opts);
+ test.testPt(test.mkPt(), opts);
+ test.testPt(test.mkPt(), opts);
+
+ opts.sphere = 1;
+ test.testPt([0, 0], opts);
+ test.testPt(test.mkPt(0.8), opts);
+ test.testPt(test.mkPt(0.8), opts);
+ test.testPt(test.mkPt(0.8), opts);
+ test.testPt(test.mkPt(0.8), opts);
+
+ s.stop();
})();
diff --git a/jstests/sharding/geo_shardedgeonear.js b/jstests/sharding/geo_shardedgeonear.js
index d00c6a057a3..54bda17cf16 100644
--- a/jstests/sharding/geo_shardedgeonear.js
+++ b/jstests/sharding/geo_shardedgeonear.js
@@ -9,38 +9,48 @@ function test(db, sharded, indexType) {
if (sharded) {
var shards = [];
var config = shardedDB.getSiblingDB("config");
- config.shards.find().forEach(function(shard) { shards.push(shard._id); });
+ config.shards.find().forEach(function(shard) {
+ shards.push(shard._id);
+ });
- shardedDB.adminCommand({shardCollection: shardedDB[coll].getFullName(), key: {rand:1}});
- for (var i=1; i < 10; i++) {
+ shardedDB.adminCommand({shardCollection: shardedDB[coll].getFullName(), key: {rand: 1}});
+ for (var i = 1; i < 10; i++) {
// split at 0.1, 0.2, ... 0.9
- shardedDB.adminCommand({split: shardedDB[coll].getFullName(), middle: {rand: i/10}});
- shardedDB.adminCommand({moveChunk: shardedDB[coll].getFullName(), find: {rand: i/10},
- to: shards[i%shards.length]});
+ shardedDB.adminCommand({split: shardedDB[coll].getFullName(), middle: {rand: i / 10}});
+ shardedDB.adminCommand({
+ moveChunk: shardedDB[coll].getFullName(),
+ find: {rand: i / 10},
+ to: shards[i % shards.length]
+ });
}
assert.eq(config.chunks.count({'ns': shardedDB[coll].getFullName()}), 10);
}
Random.setRandomSeed();
- var numPts = 10*1000;
- for (var i=0; i < numPts; i++) {
+ var numPts = 10 * 1000;
+ for (var i = 0; i < numPts; i++) {
var lat = 90 - Random.rand() * 180;
var lng = 180 - Random.rand() * 360;
- assert.writeOK(db[coll].insert({rand:Math.random(), loc: [lng, lat]}));
+ assert.writeOK(db[coll].insert({rand: Math.random(), loc: [lng, lat]}));
}
assert.eq(db[coll].count(), numPts);
- assert.commandWorked(db[coll].ensureIndex({ loc: indexType }));
+ assert.commandWorked(db[coll].ensureIndex({loc: indexType}));
- var queryPoint = [0,0];
- geoCmd = {geoNear: coll, near: queryPoint, spherical: true, includeLocs: true};
+ var queryPoint = [0, 0];
+ geoCmd = {
+ geoNear: coll,
+ near: queryPoint,
+ spherical: true,
+ includeLocs: true
+ };
assert.commandWorked(db.runCommand(geoCmd), tojson({sharded: sharded, indexType: indexType}));
}
-var sharded = new ShardingTest({ shards: 3, mongos: 1 });
+var sharded = new ShardingTest({shards: 3, mongos: 1});
sharded.stopBalancer();
-sharded.adminCommand( { enablesharding : "test" } );
+sharded.adminCommand({enablesharding: "test"});
var shardedDB = sharded.getDB('test');
sharded.ensurePrimaryShard('test', 'shard0001');
printjson(shardedDB);
diff --git a/jstests/sharding/group_slaveok.js b/jstests/sharding/group_slaveok.js
index 0c7175f8fed..0d9221ed896 100644
--- a/jstests/sharding/group_slaveok.js
+++ b/jstests/sharding/group_slaveok.js
@@ -1,63 +1,68 @@
// Tests group using slaveOk
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ name: "groupSlaveOk",
- shards: 1,
- mongos: 1,
- other: { rs: true,
- rs0: { nodes: 2 } } });
+ var st = new ShardingTest(
+ {name: "groupSlaveOk", shards: 1, mongos: 1, other: {rs: true, rs0: {nodes: 2}}});
-var rst = st._rs[0].test;
+ var rst = st._rs[0].test;
-// Insert data into replica set
-var conn = new Mongo(st.s.host);
-conn.setLogLevel(3);
+ // Insert data into replica set
+ var conn = new Mongo(st.s.host);
+ conn.setLogLevel(3);
-var coll = conn.getCollection("test.groupSlaveOk");
-coll.drop();
+ var coll = conn.getCollection("test.groupSlaveOk");
+ coll.drop();
-var bulk = coll.initializeUnorderedBulkOp();
-for(var i = 0; i < 300; i++) {
- bulk.insert({ i: i % 10 });
-}
-assert.writeOK(bulk.execute());
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < 300; i++) {
+ bulk.insert({i: i % 10});
+ }
+ assert.writeOK(bulk.execute());
-// Wait for client to update itself and replication to finish
-rst.awaitReplication();
+ // Wait for client to update itself and replication to finish
+ rst.awaitReplication();
-var primary = rst.getPrimary();
-var sec = rst.getSecondary();
+ var primary = rst.getPrimary();
+ var sec = rst.getSecondary();
-// Data now inserted... stop the master, since only two in set, other will still be secondary
-rst.stop(rst.getPrimary());
-printjson(rst.status());
+ // Data now inserted... stop the master, since only two in set, other will still be secondary
+ rst.stop(rst.getPrimary());
+ printjson(rst.status());
-// Wait for the mongos to recognize the slave
-ReplSetTest.awaitRSClientHosts(conn, sec, { ok: true, secondary: true });
+ // Wait for the mongos to recognize the slave
+ ReplSetTest.awaitRSClientHosts(conn, sec, {ok: true, secondary: true});
-// Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when
-// master is down
-conn.setSlaveOk();
+ // Need to check slaveOk=true first, since slaveOk=false will destroy conn in pool when
+ // master is down
+ conn.setSlaveOk();
-// Should not throw exception, since slaveOk'd
-assert.eq(10, coll.group({ key: { i: true } ,
- reduce: function(obj, ctx) { ctx.count += 1; },
- initial: { count: 0 } }).length);
+ // Should not throw exception, since slaveOk'd
+ assert.eq(10,
+ coll.group({
+ key: {i: true},
+ reduce: function(obj, ctx) {
+ ctx.count += 1;
+ },
+ initial: {count: 0}
+ }).length);
-try {
- conn.setSlaveOk(false);
- var res = coll.group({ key: { i: true },
- reduce: function(obj, ctx) { ctx.count += 1; },
- initial: { count: 0 } });
+ try {
+ conn.setSlaveOk(false);
+ var res = coll.group({
+ key: {i: true},
+ reduce: function(obj, ctx) {
+ ctx.count += 1;
+ },
+ initial: {count: 0}
+ });
- print("Should not reach here! Group result: " + tojson(res));
- assert(false);
-}
-catch(e){
- print("Non-slaveOk'd connection failed." + tojson(e));
-}
+ print("Should not reach here! Group result: " + tojson(res));
+ assert(false);
+ } catch (e) {
+ print("Non-slaveOk'd connection failed." + tojson(e));
+ }
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/hash_basic.js b/jstests/sharding/hash_basic.js
index d7599488695..e4bf6ded27b 100644
--- a/jstests/sharding/hash_basic.js
+++ b/jstests/sharding/hash_basic.js
@@ -1,25 +1,27 @@
-var st = new ShardingTest({ shards: 2, chunkSize: 1 });
+var st = new ShardingTest({shards: 2, chunkSize: 1});
var testDB = st.s.getDB('test');
-testDB.adminCommand({ enableSharding: 'test' });
+testDB.adminCommand({enableSharding: 'test'});
st.ensurePrimaryShard('test', 'shard0001');
-testDB.adminCommand({ shardCollection: 'test.user', key: { x: 'hashed' }});
+testDB.adminCommand({shardCollection: 'test.user', key: {x: 'hashed'}});
var configDB = st.s.getDB('config');
var chunkCountBefore = configDB.chunks.count();
assert.gt(chunkCountBefore, 1);
for (var x = 0; x < 1000; x++) {
- testDB.user.insert({ x: x });
+ testDB.user.insert({x: x});
}
// For debugging
(function() {
- var chunkList = configDB.chunks.find().sort({ min: -1 }).toArray();
- chunkList.forEach(function(chunk) { chunk.count = 0; });
+ var chunkList = configDB.chunks.find().sort({min: -1}).toArray();
+ chunkList.forEach(function(chunk) {
+ chunk.count = 0;
+ });
for (var x = 0; x < 1000; x++) {
- var hashVal = testDB.adminCommand({ _hashBSONElement: x }).out;
+ var hashVal = testDB.adminCommand({_hashBSONElement: x}).out;
var countSet = false;
for (var y = 0; y < chunkList.length - 2; y++) {
@@ -28,8 +30,7 @@ for (var x = 0; x < 1000; x++) {
countSet = true;
chunkDoc.count++;
- print('doc in chunk: x [' + x + '], h[' + hashVal +
- '], min[' + chunkDoc.min.x +
+ print('doc in chunk: x [' + x + '], h[' + hashVal + '], min[' + chunkDoc.min.x +
'], max[' + chunkDoc.max.x + ']');
break;
}
@@ -37,8 +38,7 @@ for (var x = 0; x < 1000; x++) {
if (!countSet) {
chunkDoc = chunkList[chunkList.length - 1];
- print('doc in chunk: x [' + x + '], h[' + hashVal +
- '], min[' + chunkDoc.min.x +
+ print('doc in chunk: x [' + x + '], h[' + hashVal + '], min[' + chunkDoc.min.x +
'], max[' + chunkDoc.max.x + ']');
chunkDoc.count++;
}
@@ -49,40 +49,40 @@ for (var x = 0; x < 1000; x++) {
});
});
-var chunkDoc = configDB.chunks.find().sort({ min: 1 }).next();
+var chunkDoc = configDB.chunks.find().sort({min: 1}).next();
var min = chunkDoc.min;
var max = chunkDoc.max;
// Assumption: There are documents in the MinKey chunk, otherwise, splitVector will
// fail. Note: This chunk will have 267 documents if collection was presplit to 4.
-var cmdRes = testDB.adminCommand({ split: 'test.user', bounds: [ min, max ]});
-assert(cmdRes.ok, 'split on bounds failed on chunk[' + tojson(chunkDoc) +
- ']: ' + tojson(cmdRes));
+var cmdRes = testDB.adminCommand({split: 'test.user', bounds: [min, max]});
+assert(cmdRes.ok, 'split on bounds failed on chunk[' + tojson(chunkDoc) + ']: ' + tojson(cmdRes));
-chunkDoc = configDB.chunks.find().sort({ min: 1 }).skip(1).next();
+chunkDoc = configDB.chunks.find().sort({min: 1}).skip(1).next();
var middle = chunkDoc.min + 1000000;
-cmdRes = testDB.adminCommand({ split: 'test.user', middle: { x: middle }});
+cmdRes = testDB.adminCommand({split: 'test.user', middle: {x: middle}});
assert(cmdRes.ok, 'split failed with middle [' + middle + ']: ' + tojson(cmdRes));
-cmdRes = testDB.adminCommand({ split: 'test.user', find: { x: 7 }});
+cmdRes = testDB.adminCommand({split: 'test.user', find: {x: 7}});
assert(cmdRes.ok, 'split failed with find: ' + tojson(cmdRes));
-var chunkList = configDB.chunks.find().sort({ min: 1 }).toArray();
+var chunkList = configDB.chunks.find().sort({min: 1}).toArray();
assert.eq(chunkCountBefore + 3, chunkList.length);
chunkList.forEach(function(chunkToMove) {
- var toShard = configDB.shards.findOne({ _id: { $ne: chunkToMove.shard }})._id;
+ var toShard = configDB.shards.findOne({_id: {$ne: chunkToMove.shard}})._id;
- print(jsTestName() + " - moving chunk " + chunkToMove._id + " from shard " +
- chunkToMove.shard + " to " + toShard + "...");
+ print(jsTestName() + " - moving chunk " + chunkToMove._id + " from shard " + chunkToMove.shard +
+ " to " + toShard + "...");
- var cmdRes = testDB.adminCommand({ moveChunk: 'test.user',
- bounds: [ chunkToMove.min, chunkToMove.max ],
- to: toShard, _waitForDelete: true });
- print(jsTestName() + " - result from moving chunk " + chunkToMove._id + ": " +
- tojson(cmdRes));
+ var cmdRes = testDB.adminCommand({
+ moveChunk: 'test.user',
+ bounds: [chunkToMove.min, chunkToMove.max],
+ to: toShard,
+ _waitForDelete: true
+ });
+ print(jsTestName() + " - result from moving chunk " + chunkToMove._id + ": " + tojson(cmdRes));
});
st.stop();
-
diff --git a/jstests/sharding/hash_shard1.js b/jstests/sharding/hash_shard1.js
index dc07f3e20f5..10ab1b1308b 100644
--- a/jstests/sharding/hash_shard1.js
+++ b/jstests/sharding/hash_shard1.js
@@ -1,13 +1,13 @@
// Basic test of sharding with a hashed shard key
// - Test basic migrations with moveChunk, using different chunk specification methods
-var s = new ShardingTest( { name : jsTestName() , shards : 3 , mongos : 1, verbose : 1 } );
+var s = new ShardingTest({name: jsTestName(), shards: 3, mongos: 1, verbose: 1});
var dbname = "test";
var coll = "foo";
var ns = dbname + "." + coll;
-var db = s.getDB( dbname );
-var t = db.getCollection( coll );
-db.adminCommand( { enablesharding : dbname } );
+var db = s.getDB(dbname);
+var t = db.getCollection(coll);
+db.adminCommand({enablesharding: dbname});
s.ensurePrimaryShard(dbname, 'shard0001');
// for simplicity start by turning off balancer
@@ -15,50 +15,46 @@ s.stopBalancer();
// shard a fresh collection using a hashed shard key
t.drop();
-var res = db.adminCommand( { shardcollection : ns , key : { a : "hashed" } } );
-assert.gt( s.config.chunks.count({ns:ns}), 3);
-assert.eq( res.ok , 1 , "shardcollection didn't work" );
+var res = db.adminCommand({shardcollection: ns, key: {a: "hashed"}});
+assert.gt(s.config.chunks.count({ns: ns}), 3);
+assert.eq(res.ok, 1, "shardcollection didn't work");
s.printShardingStatus();
// insert stuff
var numitems = 1000;
-for(i = 0; i < numitems; i++ ){
- t.insert( { a: i } );
+for (i = 0; i < numitems; i++) {
+ t.insert({a: i});
}
// check they all got inserted
-assert.eq( t.find().count() , numitems , "count off after inserts" );
-printjson( t.find().explain() );
+assert.eq(t.find().count(), numitems, "count off after inserts");
+printjson(t.find().explain());
// find a chunk that's not on shard0000
-var chunk = s.config.chunks.findOne( {shard : {$ne : "shard0000"} } );
-assert.neq(chunk, null, "all chunks on shard0000!");
+var chunk = s.config.chunks.findOne({shard: {$ne: "shard0000"}});
+assert.neq(chunk, null, "all chunks on shard0000!");
printjson(chunk);
// try to move the chunk using an invalid specification method. should fail.
-var res = db.adminCommand( { movechunk : ns ,
- find : { a : 0 } ,
- bounds : [ chunk.min , chunk.max ] ,
- to: "shard0000" } );
-assert.eq( res.ok , 0 , "moveChunk shouldn't work with invalid specification method");
+var res = db.adminCommand(
+ {movechunk: ns, find: {a: 0}, bounds: [chunk.min, chunk.max], to: "shard0000"});
+assert.eq(res.ok, 0, "moveChunk shouldn't work with invalid specification method");
// now move a chunk using the lower/upper bound method. should work.
-var res = db.adminCommand( { movechunk : ns ,
- bounds : [ chunk.min , chunk.max ] ,
- to: "shard0000" } );
-printjson( res );
-assert.eq( res.ok , 1 , "movechunk using lower/upper bound method didn't work " );
+var res = db.adminCommand({movechunk: ns, bounds: [chunk.min, chunk.max], to: "shard0000"});
+printjson(res);
+assert.eq(res.ok, 1, "movechunk using lower/upper bound method didn't work ");
// check count still correct.
-assert.eq( t.find().itcount() , numitems , "count off after migrate" );
-printjson( t.find().explain() );
+assert.eq(t.find().itcount(), numitems, "count off after migrate");
+printjson(t.find().explain());
// move a chunk using the find method
-var res = db.adminCommand( { movechunk : ns , find : { a : 2 } , to: "shard0002" } );
-printjson( res );
-assert.eq( res.ok , 1 , "movechunk using find query didn't work" );
+var res = db.adminCommand({movechunk: ns, find: {a: 2}, to: "shard0002"});
+printjson(res);
+assert.eq(res.ok, 1, "movechunk using find query didn't work");
// check counts still correct
-assert.eq( t.find().itcount() , numitems , "count off after migrate" );
-printjson( t.find().explain() );
+assert.eq(t.find().itcount(), numitems, "count off after migrate");
+printjson(t.find().explain());
s.stop();
diff --git a/jstests/sharding/hash_shard_non_empty.js b/jstests/sharding/hash_shard_non_empty.js
index 47e2aa37e1b..35c7572bb75 100644
--- a/jstests/sharding/hash_shard_non_empty.js
+++ b/jstests/sharding/hash_shard_non_empty.js
@@ -1,23 +1,22 @@
// Hash sharding on a non empty collection should not pre-split.
-var s = new ShardingTest({ name : jsTestName(), shards : 3, mongos : 1, verbose : 1 });
+var s = new ShardingTest({name: jsTestName(), shards: 3, mongos: 1, verbose: 1});
var dbname = "test";
var coll = "foo";
var db = s.getDB(dbname);
-db.adminCommand({ enablesharding : dbname });
+db.adminCommand({enablesharding: dbname});
s.ensurePrimaryShard('test', 'shard0001');
-//for simplicity turn off balancer
+// for simplicity turn off balancer
s.stopBalancer();
-db.getCollection(coll).insert({ a : 1 });
+db.getCollection(coll).insert({a: 1});
-db.getCollection(coll).ensureIndex({ a: "hashed"});
-var res = db.adminCommand({ shardcollection : dbname + "." + coll, key : { a : "hashed" } });
+db.getCollection(coll).ensureIndex({a: "hashed"});
+var res = db.adminCommand({shardcollection: dbname + "." + coll, key: {a: "hashed"}});
assert.eq(res.ok, 1, "shardcollection didn't work");
s.printShardingStatus();
var numChunks = s.config.chunks.count();
-assert.eq(numChunks, 1 , "sharding non-empty collection should not pre-split");
+assert.eq(numChunks, 1, "sharding non-empty collection should not pre-split");
s.stop();
-
diff --git a/jstests/sharding/hash_shard_num_chunks.js b/jstests/sharding/hash_shard_num_chunks.js
index ad32a080c70..b34ee7ecc92 100644
--- a/jstests/sharding/hash_shard_num_chunks.js
+++ b/jstests/sharding/hash_shard_num_chunks.js
@@ -2,39 +2,38 @@
(function() {
-var s = new ShardingTest({ shards: 3, mongos: 1 });
-s.stopBalancer();
+ var s = new ShardingTest({shards: 3, mongos: 1});
+ s.stopBalancer();
-var dbname = "test";
-var coll = "foo";
-var db = s.getDB(dbname);
+ var dbname = "test";
+ var coll = "foo";
+ var db = s.getDB(dbname);
-assert.commandWorked(db.adminCommand({ enablesharding: dbname }));
-s.ensurePrimaryShard(dbname, 'shard0001');
+ assert.commandWorked(db.adminCommand({enablesharding: dbname}));
+ s.ensurePrimaryShard(dbname, 'shard0001');
-assert.commandWorked(db.adminCommand({ shardcollection: dbname + "." + coll,
- key: { a: "hashed" },
- numInitialChunks: 500 }));
+ assert.commandWorked(db.adminCommand(
+ {shardcollection: dbname + "." + coll, key: {a: "hashed"}, numInitialChunks: 500}));
-s.printShardingStatus();
+ s.printShardingStatus();
-var numChunks = s.config.chunks.count();
-assert.eq(numChunks, 500 , "should be exactly 500 chunks");
+ var numChunks = s.config.chunks.count();
+ assert.eq(numChunks, 500, "should be exactly 500 chunks");
-var shards = s.config.shards.find();
-shards.forEach(
- // check that each shard has one third the numInitialChunks
- function (shard){
- var numChunksOnShard = s.config.chunks.find({"shard": shard._id}).count();
- assert.gte(numChunksOnShard, Math.floor(500/3));
- }
-);
+ var shards = s.config.shards.find();
+ shards.forEach(
+ // check that each shard has one third the numInitialChunks
+ function(shard) {
+ var numChunksOnShard = s.config.chunks.find({"shard": shard._id}).count();
+ assert.gte(numChunksOnShard, Math.floor(500 / 3));
+ });
-// Check that the collection gets dropped correctly (which doesn't happen if pre-splitting fails to
-// create the collection on all shards).
-res = db.runCommand({ "drop": coll });
-assert.eq(res.ok, 1, "couldn't drop empty, pre-split collection");
+ // Check that the collection gets dropped correctly (which doesn't happen if pre-splitting fails
+ // to
+ // create the collection on all shards).
+ res = db.runCommand({"drop": coll});
+ assert.eq(res.ok, 1, "couldn't drop empty, pre-split collection");
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/hash_shard_unique_compound.js b/jstests/sharding/hash_shard_unique_compound.js
index 25acfb87f48..3d82c2452b5 100644
--- a/jstests/sharding/hash_shard_unique_compound.js
+++ b/jstests/sharding/hash_shard_unique_compound.js
@@ -3,26 +3,26 @@
// 1.) shard collection on hashed "a", ensure unique index {a:1, b:1}
// 2.) reverse order
-var s = new ShardingTest( { name : jsTestName() , shards : 1 , mongos : 1, verbose : 1 } );
+var s = new ShardingTest({name: jsTestName(), shards: 1, mongos: 1, verbose: 1});
var dbName = "test";
var collName = "foo";
var ns = dbName + "." + collName;
-var db = s.getDB( dbName );
-var coll = db.getCollection( collName );
+var db = s.getDB(dbName);
+var coll = db.getCollection(collName);
// Enable sharding on DB
-var res = db.adminCommand( { enablesharding : dbName } );
+var res = db.adminCommand({enablesharding: dbName});
// for simplicity start by turning off balancer
var res = s.stopBalancer();
// shard a fresh collection using a hashed shard key
coll.drop();
-assert.commandWorked(db.adminCommand( { shardcollection : ns , key : { a : "hashed" } } ));
+assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}));
s.printShardingStatus();
// Create unique index
-assert.commandWorked(coll.ensureIndex({ a: 1, b: 1 }, { unique: true }));
+assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true}));
jsTest.log("------ indexes -------");
jsTest.log(tojson(coll.getIndexes()));
@@ -31,11 +31,11 @@ jsTest.log(tojson(coll.getIndexes()));
jsTest.log("------ dropping sharded collection to start part 2 -------");
coll.drop();
-//Create unique index
-assert.commandWorked(coll.ensureIndex({ a: 1, b: 1 }, { unique: true }));
+// Create unique index
+assert.commandWorked(coll.ensureIndex({a: 1, b: 1}, {unique: true}));
// shard a fresh collection using a hashed shard key
-assert.commandWorked(db.adminCommand( { shardcollection : ns , key : { a : "hashed" } } ),
+assert.commandWorked(db.adminCommand({shardcollection: ns, key: {a: "hashed"}}),
"shardcollection didn't worked 2");
s.printShardingStatus();
diff --git a/jstests/sharding/hash_single_shard.js b/jstests/sharding/hash_single_shard.js
index 7dc9d6df078..8018a1ab640 100644
--- a/jstests/sharding/hash_single_shard.js
+++ b/jstests/sharding/hash_single_shard.js
@@ -1,14 +1,14 @@
// Test hashed presplit with 1 shard.
-var st = new ShardingTest({ shards: 1 });
+var st = new ShardingTest({shards: 1});
var testDB = st.getDB('test');
-//create hashed shard key and enable sharding
-testDB.adminCommand({ enablesharding: "test" });
-testDB.adminCommand({ shardCollection: "test.collection", key: { a: "hashed" }});
+// create hashed shard key and enable sharding
+testDB.adminCommand({enablesharding: "test"});
+testDB.adminCommand({shardCollection: "test.collection", key: {a: "hashed"}});
-//check the number of initial chunks.
-assert.eq(2, st.getDB('config').chunks.count(),
- 'Using hashed shard key but failing to do correct presplitting');
+// check the number of initial chunks.
+assert.eq(2,
+ st.getDB('config').chunks.count(),
+ 'Using hashed shard key but failing to do correct presplitting');
st.stop();
-
diff --git a/jstests/sharding/hash_skey_split.js b/jstests/sharding/hash_skey_split.js
index a1f0060feae..fe8cef3e0d3 100644
--- a/jstests/sharding/hash_skey_split.js
+++ b/jstests/sharding/hash_skey_split.js
@@ -1,26 +1,25 @@
(function() {
-var st = new ShardingTest({ shards: 2 });
+ var st = new ShardingTest({shards: 2});
-var configDB = st.s.getDB('config');
-assert.commandWorked(configDB.adminCommand({ enableSharding: 'test' }));
+ var configDB = st.s.getDB('config');
+ assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
-st.ensurePrimaryShard('test', 'shard0001');
-assert.commandWorked(configDB.adminCommand({ shardCollection: 'test.user',
- key: { x: 'hashed' },
- numInitialChunks: 2 }));
+ st.ensurePrimaryShard('test', 'shard0001');
+ assert.commandWorked(configDB.adminCommand(
+ {shardCollection: 'test.user', key: {x: 'hashed'}, numInitialChunks: 2}));
-var metadata = st.d0.getDB('admin').runCommand({ getShardVersion: 'test.user',
- fullMetadata: true });
-var chunks = metadata.metadata.chunks.length > 0 ?
- metadata.metadata.chunks : metadata.metadata.pending;
-assert(bsonWoCompare(chunks[0][0], chunks[0][1]) < 0, tojson(metadata));
+ var metadata =
+ st.d0.getDB('admin').runCommand({getShardVersion: 'test.user', fullMetadata: true});
+ var chunks =
+ metadata.metadata.chunks.length > 0 ? metadata.metadata.chunks : metadata.metadata.pending;
+ assert(bsonWoCompare(chunks[0][0], chunks[0][1]) < 0, tojson(metadata));
-metadata = st.d1.getDB('admin').runCommand({ getShardVersion: 'test.user',
- fullMetadata: true });
-chunks = metadata.metadata.chunks.length > 0 ? metadata.metadata.chunks : metadata.metadata.pending;
-assert(bsonWoCompare(chunks[0][0], chunks[0][1]) < 0, tojson(metadata));
+ metadata = st.d1.getDB('admin').runCommand({getShardVersion: 'test.user', fullMetadata: true});
+ chunks =
+ metadata.metadata.chunks.length > 0 ? metadata.metadata.chunks : metadata.metadata.pending;
+ assert(bsonWoCompare(chunks[0][0], chunks[0][1]) < 0, tojson(metadata));
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/idhack_sharded.js b/jstests/sharding/idhack_sharded.js
index 8c45f5d0f00..a5a8ae5df5c 100644
--- a/jstests/sharding/idhack_sharded.js
+++ b/jstests/sharding/idhack_sharded.js
@@ -10,8 +10,8 @@ assert.commandWorked(coll.getDB().adminCommand({enableSharding: coll.getDB().get
coll.getDB().adminCommand({movePrimary: coll.getDB().getName(), to: "shard0000"});
assert.commandWorked(coll.getDB().adminCommand({shardCollection: coll.getFullName(), key: {x: 1}}));
assert.commandWorked(coll.getDB().adminCommand({split: coll.getFullName(), middle: {x: 0}}));
-assert.commandWorked(coll.getDB().adminCommand({moveChunk: coll.getFullName(), find: {x: 0},
- to: "shard0001"}));
+assert.commandWorked(
+ coll.getDB().adminCommand({moveChunk: coll.getFullName(), find: {x: 0}, to: "shard0001"}));
//
// Test that idhack queries with projections that remove the shard key return correct results.
diff --git a/jstests/sharding/implicit_db_creation.js b/jstests/sharding/implicit_db_creation.js
index 19c48a5f03b..ed202ff2ea5 100644
--- a/jstests/sharding/implicit_db_creation.js
+++ b/jstests/sharding/implicit_db_creation.js
@@ -2,45 +2,45 @@
* This tests the basic cases for implicit database creation in a sharded cluster.
*/
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({ shards: 2 });
-var configDB = st.s.getDB('config');
+ var st = new ShardingTest({shards: 2});
+ var configDB = st.s.getDB('config');
-assert.eq(null, configDB.databases.findOne());
+ assert.eq(null, configDB.databases.findOne());
-var testDB = st.s.getDB('test');
+ var testDB = st.s.getDB('test');
-// Test that reads will not result into a new config.databases entry.
-assert.eq(null, testDB.user.findOne());
-assert.eq(null, configDB.databases.findOne({ _id: 'test' }));
+ // Test that reads will not result into a new config.databases entry.
+ assert.eq(null, testDB.user.findOne());
+ assert.eq(null, configDB.databases.findOne({_id: 'test'}));
-assert.writeOK(testDB.user.insert({ x: 1 }));
+ assert.writeOK(testDB.user.insert({x: 1}));
-var testDBDoc = configDB.databases.findOne();
-assert.eq('test', testDBDoc._id, tojson(testDBDoc));
+ var testDBDoc = configDB.databases.findOne();
+ assert.eq('test', testDBDoc._id, tojson(testDBDoc));
-// Test that inserting to another collection in the same database will not modify the existing
-// config.databases entry.
-assert.writeOK(testDB.bar.insert({ y: 1 }));
-assert.eq(testDBDoc, configDB.databases.findOne());
+ // Test that inserting to another collection in the same database will not modify the existing
+ // config.databases entry.
+ assert.writeOK(testDB.bar.insert({y: 1}));
+ assert.eq(testDBDoc, configDB.databases.findOne());
-st.s.adminCommand({ enableSharding: 'foo' });
-var fooDBDoc = configDB.databases.findOne({ _id: 'foo' });
+ st.s.adminCommand({enableSharding: 'foo'});
+ var fooDBDoc = configDB.databases.findOne({_id: 'foo'});
-assert.neq(null, fooDBDoc);
-assert(fooDBDoc.partitioned);
+ assert.neq(null, fooDBDoc);
+ assert(fooDBDoc.partitioned);
-var newShardConn = MongoRunner.runMongod({});
-var unshardedDB = newShardConn.getDB('unshardedDB');
+ var newShardConn = MongoRunner.runMongod({});
+ var unshardedDB = newShardConn.getDB('unshardedDB');
-unshardedDB.user.insert({ z: 1 });
+ unshardedDB.user.insert({z: 1});
-assert.commandWorked(st.s.adminCommand({ addShard: newShardConn.name }));
+ assert.commandWorked(st.s.adminCommand({addShard: newShardConn.name}));
-assert.neq(null, configDB.databases.findOne({ _id: 'unshardedDB' }));
+ assert.neq(null, configDB.databases.findOne({_id: 'unshardedDB'}));
-MongoRunner.stopMongod(newShardConn.port);
-st.stop();
+ MongoRunner.stopMongod(newShardConn.port);
+ st.stop();
})();
diff --git a/jstests/sharding/in_memory_sort_limit.js b/jstests/sharding/in_memory_sort_limit.js
index 83aa8839986..1c7e8c73447 100644
--- a/jstests/sharding/in_memory_sort_limit.js
+++ b/jstests/sharding/in_memory_sort_limit.js
@@ -2,44 +2,48 @@
// doesn't cause the in-memory sort limit to be reached, then make sure the same limit also doesn't
// cause the in-memory sort limit to be reached when running through a mongos.
(function() {
- "use strict";
-
- var st = new ShardingTest({ shards: 2 });
- var db = st.s.getDB('test');
- var mongosCol = db.getCollection('skip');
- db.adminCommand({ enableSharding: 'test' });
- st.ensurePrimaryShard('test', 'shard0001');
- db.adminCommand({ shardCollection: 'test.skip', key: { _id: 1 }});
-
- var filler = new Array(10000).toString();
- var bulk = [];
- // create enough data to exceed 32MB in-memory sort limit.
- for (var i = 0; i < 20000; i++) {
- bulk.push({x:i, str:filler});
- }
- assert.writeOK(mongosCol.insert(bulk));
-
- // Make sure that at least 1 chunk is on another shard so that mongos doesn't treat this as a
- // single-shard query (which doesn't exercise the bug).
- st.startBalancer();
- st.awaitBalance('skip', 'test');
-
- var docCount = mongosCol.count();
- var shardCol = st.shard0.getDB('test').getCollection('skip');
- var passLimit = 2000;
- var failLimit = 4000;
- jsTestLog("Test no error with limit of " + passLimit + " on mongod");
- assert.eq(passLimit, shardCol.find().sort({x:1}).limit(passLimit).itcount());
-
- jsTestLog("Test error with limit of " + failLimit + " on mongod");
- assert.throws( function() {shardCol.find().sort({x:1}).limit(failLimit).itcount(); } );
-
- jsTestLog("Test no error with limit of " + passLimit + " on mongos");
- assert.eq(passLimit, mongosCol.find().sort({x:1}).limit(passLimit).itcount());
-
- jsTestLog("Test error with limit of " + failLimit + " on mongos");
- assert.throws( function() {mongosCol.find().sort({x:1}).limit(failLimit).itcount(); } );
-
- st.stop();
-
- })();
+ "use strict";
+
+ var st = new ShardingTest({shards: 2});
+ var db = st.s.getDB('test');
+ var mongosCol = db.getCollection('skip');
+ db.adminCommand({enableSharding: 'test'});
+ st.ensurePrimaryShard('test', 'shard0001');
+ db.adminCommand({shardCollection: 'test.skip', key: {_id: 1}});
+
+ var filler = new Array(10000).toString();
+ var bulk = [];
+ // create enough data to exceed 32MB in-memory sort limit.
+ for (var i = 0; i < 20000; i++) {
+ bulk.push({x: i, str: filler});
+ }
+ assert.writeOK(mongosCol.insert(bulk));
+
+ // Make sure that at least 1 chunk is on another shard so that mongos doesn't treat this as a
+ // single-shard query (which doesn't exercise the bug).
+ st.startBalancer();
+ st.awaitBalance('skip', 'test');
+
+ var docCount = mongosCol.count();
+ var shardCol = st.shard0.getDB('test').getCollection('skip');
+ var passLimit = 2000;
+ var failLimit = 4000;
+ jsTestLog("Test no error with limit of " + passLimit + " on mongod");
+ assert.eq(passLimit, shardCol.find().sort({x: 1}).limit(passLimit).itcount());
+
+ jsTestLog("Test error with limit of " + failLimit + " on mongod");
+ assert.throws(function() {
+ shardCol.find().sort({x: 1}).limit(failLimit).itcount();
+ });
+
+ jsTestLog("Test no error with limit of " + passLimit + " on mongos");
+ assert.eq(passLimit, mongosCol.find().sort({x: 1}).limit(passLimit).itcount());
+
+ jsTestLog("Test error with limit of " + failLimit + " on mongos");
+ assert.throws(function() {
+ mongosCol.find().sort({x: 1}).limit(failLimit).itcount();
+ });
+
+ st.stop();
+
+})();
diff --git a/jstests/sharding/index1.js b/jstests/sharding/index1.js
index 7f0cea49cf0..766bd96e260 100644
--- a/jstests/sharding/index1.js
+++ b/jstests/sharding/index1.js
@@ -1,391 +1,359 @@
// SERVER-2326 - make sure that sharding only works with unique indices
(function() {
-var s = new ShardingTest({ name: "shard_index", shards: 2, mongos: 1 });
-
-// Regenerate fully because of SERVER-2782
-for ( var i = 0; i < 22; i++ ) {
-
- var coll = s.admin._mongo.getDB( "test" ).getCollection( "foo" + i );
- coll.drop();
-
- var bulk = coll.initializeUnorderedBulkOp();
- for ( var j = 0; j < 300; j++ ) {
- bulk.insert({ num: j, x: 1 });
- }
- assert.writeOK(bulk.execute());
-
- if (i == 0) {
- s.adminCommand({ enablesharding: "" + coll._db });
- s.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
- }
-
- print("\n\n\n\n\nTest # " + i);
-
- if ( i == 0 ) {
-
- // Unique index exists, but not the right one.
- coll.ensureIndex( { num : 1 }, { unique : true } );
- coll.ensureIndex( { x : 1 } );
-
- passed = false;
- try {
- s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } );
- passed = true;
- } catch (e) {
- print( e );
- }
- assert( !passed, "Should not shard collection when another unique index exists!");
-
- }
- if ( i == 1 ) {
-
- // Unique index exists as prefix, also index exists
- coll.ensureIndex( { x : 1 } );
- coll.ensureIndex( { x : 1, num : 1 }, { unique : true } );
-
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } });
- }
- catch(e){
- print(e);
- assert( false, "Should be able to shard non-unique index without unique option.");
- }
-
- }
- if ( i == 2 ) {
- // Non-unique index exists as prefix, also index exists. No unique index.
- coll.ensureIndex( { x : 1 } );
- coll.ensureIndex( { x : 1, num : 1 } );
-
- passed = false;
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { x : 1 } });
- passed = true;
-
- }
- catch( e ){
- print(e);
- assert( !passed, "Should be able to shard collection with no unique index if unique not specified.");
- }
- }
- if ( i == 3 ) {
-
- // Unique index exists as prefix, also unique index exists
- coll.ensureIndex( { num : 1 }, { unique : true });
- coll.ensureIndex( { num : 1 , x : 1 }, { unique : true } );
-
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true });
- }
- catch( e ){
- print(e);
- assert( false, "Should be able to shard collection with unique prefix index.");
- }
-
- }
- if ( i == 4 ) {
-
- // Unique index exists as id, also unique prefix index exists
- coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } );
-
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { _id : 1 }, unique : true });
- }
- catch( e ){
- print(e);
- assert( false, "Should be able to shard collection with unique id index.");
- }
-
- }
- if ( i == 5 ) {
-
- // Unique index exists as id, also unique prefix index exists
- coll.ensureIndex( { _id : 1, num : 1 }, { unique : true } );
-
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { _id : 1, num : 1 }, unique : true });
- }
- catch( e ){
- print(e);
- assert( false, "Should be able to shard collection with unique combination id index.");
- }
-
- }
- if ( i == 6 ) {
-
- coll.remove({});
-
- // Unique index does not exist, also unique prefix index exists
- coll.ensureIndex( { num : 1, _id : 1 }, { unique : true } );
-
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true });
- }
- catch( e ){
- print(e);
- assert( false, "Should be able to shard collection with no unique index but with a unique prefix index.");
- }
-
- printjson( coll.getIndexes() );
-
- // Make sure the index created is unique!
- assert.eq( 1, coll.getIndexes().filter( function(z) { return friendlyEqual( z.key, { num : 1 } ) && z.unique; } ).length );
-
- }
- if ( i == 7 ) {
- coll.remove({});
-
- // No index exists
-
- try{
- assert.eq( coll.find().itcount(), 0 );
- s.adminCommand({ shardcollection : "" + coll, key : { num : 1 } });
- }
- catch( e ){
- print(e);
- assert( false, "Should be able to shard collection with no index on shard key.");
- }
- }
- if ( i == 8 ) {
- coll.remove({});
-
- // No index exists
-
- passed = false;
- try{
- assert.eq( coll.find().itcount(), 0 );
- s.adminCommand({ shardcollection : "" + coll, key : { num : 1 }, unique : true });
- passed = true;
- }
- catch( e ){
- print(e);
- }
- assert( passed, "Should be able to shard collection with unique flag but with no unique index on shard key, if coll empty.");
-
- printjson( coll.getIndexes() );
-
- // Make sure the index created is unique!
- assert.eq( 1, coll.getIndexes().filter( function(z) { return friendlyEqual( z.key, { num : 1 } ) && z.unique; } ).length );
- }
- if ( i == 9 ) {
-
- // Unique index exists on a different field as well
- coll.ensureIndex( { num : 1 }, { unique : true } );
- coll.ensureIndex( { x : 1 } );
-
- passed = false;
- try {
- s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } );
- passed = true;
- } catch (e) {
- print( e );
- }
- assert( !passed, "Should not shard collection when another unique index exists!" );
- }
- if ( i == 10 ){
-
- //try sharding non-empty collection without any index
- passed = false;
- try{
- s.adminCommand( { shardcollection : "" + coll, key : { num : 1 } } );
- passed = true;
- }
- catch( e ){
- print(e);
- }
- assert( !passed , "Should not be able to shard without index");
-
- //now add containing index and try sharding by prefix
- coll.ensureIndex( {num : 1, x : 1} );
-
- try{
- s.adminCommand( { shardcollection : "" + coll, key : { num : 1 } } );
- passed = true;
- }
- catch( e ){
- print(e);
- }
- assert( passed , "Should be able to shard collection with prefix of existing index");
-
- printjson( coll.getIndexes() );
-
- //make sure no extra index is created
- assert.eq( 2, coll.getIndexes().length );
- }
- if ( i == 11 ){
- coll.remove({});
-
- //empty collection with useful index. check new index not created
- coll.ensureIndex( {num : 1, x : 1} );
-
- try{
- s.adminCommand( { shardcollection : "" + coll, key : { num : 1 } } );
- passed = true;
- }
- catch( e ){
- print(e);
- }
- assert( passed , "Should be able to shard collection with prefix of existing index");
-
- printjson( coll.getIndexes() );
-
- //make sure no extra index is created
- assert.eq( 2, coll.getIndexes().length );
- }
- if ( i == 12 ){
-
- //check multikey values for x make index unusable for shard key
- coll.save({num : 100 , x : [2,3] });
- coll.ensureIndex( {num : 1, x : 1} );
-
- passed = false;
- try{
- s.adminCommand( { shardcollection : "" + coll, key : { num : 1 } } );
- passed = true;
- }
- catch( e ){
- print(e);
- }
- assert( !passed , "Should not be able to shard collection with mulikey index");
- }
- if ( i == 13 ){
-
- coll.save({ num : [100, 200], x : 10});
- coll.ensureIndex( { num : 1, x : 1} );
-
- passed = false;
- try{
- s.adminCommand( { shardcollection : "" + coll, key : { num : 1 } } );
- passed = true;
- }
- catch( e ){
- print(e);
- }
- assert( !passed , "Should not be able to shard collection with mulikey index");
-
- }
- if ( i == 14 ){
-
- coll.save({ num : 100, x : 10, y : [1,2]});
- coll.ensureIndex( { num : 1, x : 1, y : 1} );
-
- passed = false;
- try{
- s.adminCommand( { shardcollection : "" + coll, key : { num : 1 } } );
- passed = true;
- }
- catch( e ){
- print(e);
- }
- assert( !passed , "Should not be able to shard collection with mulikey index");
-
- }
- if ( i == 15 ) {
-
- // try sharding with a hashed index
- coll.ensureIndex( { num : "hashed"} );
-
- try{
- s.adminCommand( { shardcollection : "" + coll, key : { num : "hashed" } } );
- }
- catch( e ){
- print(e);
- assert( false, "Should be able to shard collection with hashed index.");
- }
- }
- if ( i == 16 ) {
-
- // create hashed index, but try to declare it unique when sharding
- coll.ensureIndex( { num : "hashed"} );
-
- passed = false;
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { num : "hashed" }, unique : true});
- passed = true;
- }
- catch( e ){
- print(e);
- }
- assert( !passed , "Should not be able to declare hashed shard key unique.");
-
- }
- if ( i == 17 ) {
-
- // create hashed index, but unrelated unique index present
- coll.ensureIndex( { x : "hashed" } );
- coll.ensureIndex( { num : 1 }, { unique : true} );
-
- passed = false;
- try {
- s.adminCommand( { shardcollection : "" + coll, key : { x : "hashed" } } );
- passed = true;
- }
- catch (e) {
- print( e );
- }
- assert( !passed, "Should not be able to shard on hashed index with another unique index" );
-
- }
- if ( i == 18 ) {
-
- // create hashed index, and a regular unique index exists on same field
- coll.ensureIndex( { num : "hashed" } );
- coll.ensureIndex( { num : 1 }, { unique : true } );
-
- try{
- s.adminCommand({ shardcollection : "" + coll, key : { num : "hashed" } } );
- }
- catch( e ){
- print(e);
- assert( false, "Should be able to shard coll with hashed and regular unique index");
- }
- }
- if ( i == 19 ) {
- // Create sparse index.
- coll.ensureIndex( { x : 1 }, { sparse : true } );
-
- passed = false;
- try {
- s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } );
- passed = true;
- }
- catch ( e ) {
- print( e );
- }
- assert( !passed, "Should not be able to shard coll with sparse index" );
- }
- if ( i == 20 ) {
- // Create partial index.
- coll.ensureIndex( { x : 1 }, { filter: { num : { $gt : 1 } } } );
-
- passed = false;
- try {
- s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } );
- passed = true;
- }
- catch ( e ) {
- print( e );
- }
- assert( !passed, "Should not be able to shard coll with partial index" );
- }
- if ( i == 21 ) {
- // Ensure that a collection with a normal index and a partial index can be sharded, where
- // both are prefixed by the shard key.
-
- coll.ensureIndex( { x : 1, num : 1 }, { filter: { num : { $gt : 1 } } } );
- coll.ensureIndex( { x : 1, num : -1 } );
-
- try {
- s.adminCommand( { shardcollection : "" + coll, key : { x : 1 } } );
- }
- catch ( e ) {
- print( e );
- assert( false, "Should be able to shard coll with regular and partial index");
+ var s = new ShardingTest({name: "shard_index", shards: 2, mongos: 1});
+
+ // Regenerate fully because of SERVER-2782
+ for (var i = 0; i < 22; i++) {
+ var coll = s.admin._mongo.getDB("test").getCollection("foo" + i);
+ coll.drop();
+
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var j = 0; j < 300; j++) {
+ bulk.insert({num: j, x: 1});
+ }
+ assert.writeOK(bulk.execute());
+
+ if (i == 0) {
+ s.adminCommand({enablesharding: "" + coll._db});
+ s.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
+ }
+
+ print("\n\n\n\n\nTest # " + i);
+
+ if (i == 0) {
+ // Unique index exists, but not the right one.
+ coll.ensureIndex({num: 1}, {unique: true});
+ coll.ensureIndex({x: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(!passed, "Should not shard collection when another unique index exists!");
+ }
+ if (i == 1) {
+ // Unique index exists as prefix, also index exists
+ coll.ensureIndex({x: 1});
+ coll.ensureIndex({x: 1, num: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard non-unique index without unique option.");
+ }
+ }
+ if (i == 2) {
+ // Non-unique index exists as prefix, also index exists. No unique index.
+ coll.ensureIndex({x: 1});
+ coll.ensureIndex({x: 1, num: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ passed = true;
+
+ } catch (e) {
+ print(e);
+ assert(
+ !passed,
+ "Should be able to shard collection with no unique index if unique not specified.");
+ }
+ }
+ if (i == 3) {
+ // Unique index exists as prefix, also unique index exists
+ coll.ensureIndex({num: 1}, {unique: true});
+ coll.ensureIndex({num: 1, x: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}, unique: true});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard collection with unique prefix index.");
+ }
+ }
+ if (i == 4) {
+ // Unique index exists as id, also unique prefix index exists
+ coll.ensureIndex({_id: 1, num: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {_id: 1}, unique: true});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard collection with unique id index.");
+ }
+ }
+ if (i == 5) {
+ // Unique index exists as id, also unique prefix index exists
+ coll.ensureIndex({_id: 1, num: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {_id: 1, num: 1}, unique: true});
+ } catch (e) {
+ print(e);
+ assert(false,
+ "Should be able to shard collection with unique combination id index.");
+ }
+ }
+ if (i == 6) {
+ coll.remove({});
+
+ // Unique index does not exist, also unique prefix index exists
+ coll.ensureIndex({num: 1, _id: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}, unique: true});
+ } catch (e) {
+ print(e);
+ assert(
+ false,
+ "Should be able to shard collection with no unique index but with a unique prefix index.");
+ }
+
+ printjson(coll.getIndexes());
+
+ // Make sure the index created is unique!
+ assert.eq(1,
+ coll.getIndexes().filter(function(z) {
+ return friendlyEqual(z.key, {num: 1}) && z.unique;
+ }).length);
+ }
+ if (i == 7) {
+ coll.remove({});
+
+ // No index exists
+
+ try {
+ assert.eq(coll.find().itcount(), 0);
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard collection with no index on shard key.");
+ }
+ }
+ if (i == 8) {
+ coll.remove({});
+
+ // No index exists
+
+ passed = false;
+ try {
+ assert.eq(coll.find().itcount(), 0);
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}, unique: true});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(
+ passed,
+ "Should be able to shard collection with unique flag but with no unique index on shard key, if coll empty.");
+
+ printjson(coll.getIndexes());
+
+ // Make sure the index created is unique!
+ assert.eq(1,
+ coll.getIndexes().filter(function(z) {
+ return friendlyEqual(z.key, {num: 1}) && z.unique;
+ }).length);
+ }
+ if (i == 9) {
+ // Unique index exists on a different field as well
+ coll.ensureIndex({num: 1}, {unique: true});
+ coll.ensureIndex({x: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(!passed, "Should not shard collection when another unique index exists!");
+ }
+ if (i == 10) {
+ // try sharding non-empty collection without any index
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(!passed, "Should not be able to shard without index");
+
+ // now add containing index and try sharding by prefix
+ coll.ensureIndex({num: 1, x: 1});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(passed, "Should be able to shard collection with prefix of existing index");
+
+ printjson(coll.getIndexes());
+
+ // make sure no extra index is created
+ assert.eq(2, coll.getIndexes().length);
+ }
+ if (i == 11) {
+ coll.remove({});
+
+ // empty collection with useful index. check new index not created
+ coll.ensureIndex({num: 1, x: 1});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(passed, "Should be able to shard collection with prefix of existing index");
+
+ printjson(coll.getIndexes());
+
+ // make sure no extra index is created
+ assert.eq(2, coll.getIndexes().length);
+ }
+ if (i == 12) {
+ // check multikey values for x make index unusable for shard key
+ coll.save({num: 100, x: [2, 3]});
+ coll.ensureIndex({num: 1, x: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(!passed, "Should not be able to shard collection with mulikey index");
+ }
+ if (i == 13) {
+ coll.save({num: [100, 200], x: 10});
+ coll.ensureIndex({num: 1, x: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(!passed, "Should not be able to shard collection with mulikey index");
+ }
+ if (i == 14) {
+ coll.save({num: 100, x: 10, y: [1, 2]});
+ coll.ensureIndex({num: 1, x: 1, y: 1});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(!passed, "Should not be able to shard collection with mulikey index");
+ }
+ if (i == 15) {
+ // try sharding with a hashed index
+ coll.ensureIndex({num: "hashed"});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: "hashed"}});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard collection with hashed index.");
+ }
+ }
+ if (i == 16) {
+ // create hashed index, but try to declare it unique when sharding
+ coll.ensureIndex({num: "hashed"});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: "hashed"}, unique: true});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(!passed, "Should not be able to declare hashed shard key unique.");
+ }
+ if (i == 17) {
+ // create hashed index, but unrelated unique index present
+ coll.ensureIndex({x: "hashed"});
+ coll.ensureIndex({num: 1}, {unique: true});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: "hashed"}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(!passed,
+ "Should not be able to shard on hashed index with another unique index");
+ }
+ if (i == 18) {
+ // create hashed index, and a regular unique index exists on same field
+ coll.ensureIndex({num: "hashed"});
+ coll.ensureIndex({num: 1}, {unique: true});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {num: "hashed"}});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard coll with hashed and regular unique index");
+ }
+ }
+ if (i == 19) {
+ // Create sparse index.
+ coll.ensureIndex({x: 1}, {sparse: true});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(!passed, "Should not be able to shard coll with sparse index");
+ }
+ if (i == 20) {
+ // Create partial index.
+ coll.ensureIndex({x: 1}, {filter: {num: {$gt: 1}}});
+
+ passed = false;
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ passed = true;
+ } catch (e) {
+ print(e);
+ }
+ assert(!passed, "Should not be able to shard coll with partial index");
+ }
+ if (i == 21) {
+ // Ensure that a collection with a normal index and a partial index can be sharded,
+ // where
+ // both are prefixed by the shard key.
+
+ coll.ensureIndex({x: 1, num: 1}, {filter: {num: {$gt: 1}}});
+ coll.ensureIndex({x: 1, num: -1});
+
+ try {
+ s.adminCommand({shardcollection: "" + coll, key: {x: 1}});
+ } catch (e) {
+ print(e);
+ assert(false, "Should be able to shard coll with regular and partial index");
+ }
}
}
-}
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/inserts_consistent.js b/jstests/sharding/inserts_consistent.js
index 7da92837ad7..b4ee71bbe7a 100644
--- a/jstests/sharding/inserts_consistent.js
+++ b/jstests/sharding/inserts_consistent.js
@@ -1,8 +1,8 @@
// Test write re-routing on version mismatch.
-var st = new ShardingTest({ shards : 2, mongos : 2, verbose : 2 });
+var st = new ShardingTest({shards: 2, mongos: 2, verbose: 2});
-jsTest.log( "Doing test setup..." );
+jsTest.log("Doing test setup...");
// Stop balancer, since it'll just get in the way of this
st.stopBalancer();
@@ -10,75 +10,75 @@ st.stopBalancer();
var mongos = st.s;
var admin = mongos.getDB("admin");
var config = mongos.getDB("config");
-var coll = st.s.getCollection( jsTest.name() + ".coll" );
+var coll = st.s.getCollection(jsTest.name() + ".coll");
-st.shardColl( coll, { _id : 1 }, { _id : 0 }, false );
+st.shardColl(coll, {_id: 1}, {_id: 0}, false);
-jsTest.log( "Refreshing second mongos..." );
+jsTest.log("Refreshing second mongos...");
var mongosB = st.s1;
var adminB = mongosB.getDB("admin");
-var collB = mongosB.getCollection( coll + "" );
+var collB = mongosB.getCollection(coll + "");
// Make sure mongosB knows about the coll
-assert.eq( 0, collB.find().itcount() );
+assert.eq(0, collB.find().itcount());
// printjson( adminB.runCommand({ flushRouterConfig : 1 }) )
-jsTest.log( "Moving chunk to create stale mongos..." );
+jsTest.log("Moving chunk to create stale mongos...");
-var otherShard = config.chunks.findOne({ _id : sh._collRE( coll ) }).shard;
-for( var i = 0; i < st._shardNames.length; i++ ){
- if( otherShard != st._shardNames[i] ){
+var otherShard = config.chunks.findOne({_id: sh._collRE(coll)}).shard;
+for (var i = 0; i < st._shardNames.length; i++) {
+ if (otherShard != st._shardNames[i]) {
otherShard = st._shardNames[i];
break;
}
}
-print( "Other shard : " + otherShard );
+print("Other shard : " + otherShard);
-printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : 0 }, to : otherShard }) );
+printjson(admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: otherShard}));
-jsTest.log( "Inserting docs that needs to be retried..." );
+jsTest.log("Inserting docs that needs to be retried...");
var nextId = -1;
-for( var i = 0; i < 2; i++ ){
- printjson( "Inserting " + nextId );
- assert.writeOK(collB.insert({ _id : nextId--, hello : "world" }));
+for (var i = 0; i < 2; i++) {
+ printjson("Inserting " + nextId);
+ assert.writeOK(collB.insert({_id: nextId--, hello: "world"}));
}
-jsTest.log( "Inserting doc which successfully goes through..." );
+jsTest.log("Inserting doc which successfully goes through...");
// Do second write
-assert.writeOK(collB.insert({ _id : nextId--, goodbye : "world" }));
+assert.writeOK(collB.insert({_id: nextId--, goodbye: "world"}));
// Assert that write went through
-assert.eq( coll.find().itcount(), 3 );
+assert.eq(coll.find().itcount(), 3);
-jsTest.log( "Now try moving the actual chunk we're writing to..." );
+jsTest.log("Now try moving the actual chunk we're writing to...");
// Now move the actual chunk we're writing to
-printjson( admin.runCommand({ moveChunk : coll + "", find : { _id : -1 }, to : otherShard }) );
+printjson(admin.runCommand({moveChunk: coll + "", find: {_id: -1}, to: otherShard}));
-jsTest.log( "Inserting second docs to get written back..." );
+jsTest.log("Inserting second docs to get written back...");
// Will fail entirely if too many of these, waiting for write to get applied can get too long.
-for( var i = 0; i < 2; i++ ){
- collB.insert({ _id : nextId--, hello : "world" });
+for (var i = 0; i < 2; i++) {
+ collB.insert({_id: nextId--, hello: "world"});
}
// Refresh server
-printjson( adminB.runCommand({ flushRouterConfig : 1 }) );
+printjson(adminB.runCommand({flushRouterConfig: 1}));
-jsTest.log( "Inserting second doc which successfully goes through..." );
+jsTest.log("Inserting second doc which successfully goes through...");
// Do second write
-assert.writeOK(collB.insert({ _id : nextId--, goodbye : "world" }));
+assert.writeOK(collB.insert({_id: nextId--, goodbye: "world"}));
-jsTest.log( "All docs written this time!" );
+jsTest.log("All docs written this time!");
// Assert that writes went through.
-assert.eq( coll.find().itcount(), 6 );
+assert.eq(coll.find().itcount(), 6);
-jsTest.log( "DONE" );
+jsTest.log("DONE");
-st.stop();
+st.stop();
diff --git a/jstests/sharding/ismaster.js b/jstests/sharding/ismaster.js
index 3f6005c4807..b3500cf6009 100644
--- a/jstests/sharding/ismaster.js
+++ b/jstests/sharding/ismaster.js
@@ -1,27 +1,38 @@
-var st = new ShardingTest({shards:1, mongos:1});
+var st = new ShardingTest({shards: 1, mongos: 1});
var res = st.s0.getDB("admin").runCommand("ismaster");
// check that the fields that should be there are there and have proper values
-assert( res.maxBsonObjectSize &&
- isNumber(res.maxBsonObjectSize) &&
- res.maxBsonObjectSize > 0, "maxBsonObjectSize possibly missing:" + tojson(res));
-assert( res.maxMessageSizeBytes &&
- isNumber(res.maxMessageSizeBytes) &&
- res.maxBsonObjectSize > 0, "maxMessageSizeBytes possibly missing:" + tojson(res));
+assert(res.maxBsonObjectSize && isNumber(res.maxBsonObjectSize) && res.maxBsonObjectSize > 0,
+ "maxBsonObjectSize possibly missing:" + tojson(res));
+assert(res.maxMessageSizeBytes && isNumber(res.maxMessageSizeBytes) && res.maxBsonObjectSize > 0,
+ "maxMessageSizeBytes possibly missing:" + tojson(res));
assert(res.ismaster, "ismaster missing or false:" + tojson(res));
assert(res.localTime, "localTime possibly missing:" + tojson(res));
assert(res.msg && res.msg == "isdbgrid", "msg possibly missing or wrong:" + tojson(res));
-var unwantedFields = ["setName", "setVersion", "secondary", "hosts", "passives", "arbiters",
- "primary", "aribterOnly", "passive", "slaveDelay", "hidden", "tags",
- "buildIndexes", "me"];
+var unwantedFields = [
+ "setName",
+ "setVersion",
+ "secondary",
+ "hosts",
+ "passives",
+ "arbiters",
+ "primary",
+ "aribterOnly",
+ "passive",
+ "slaveDelay",
+ "hidden",
+ "tags",
+ "buildIndexes",
+ "me"
+];
// check that the fields that shouldn't be there are not there
var badFields = [];
for (field in res) {
- if (!res.hasOwnProperty(field)){
+ if (!res.hasOwnProperty(field)) {
continue;
}
if (Array.contains(unwantedFields, field)) {
badFields.push(field);
}
}
-assert(badFields.length === 0, "\nthe result:\n" + tojson(res)
- + "\ncontained fields it shouldn't have: " + badFields);
+assert(badFields.length === 0,
+ "\nthe result:\n" + tojson(res) + "\ncontained fields it shouldn't have: " + badFields);
diff --git a/jstests/sharding/jumbo1.js b/jstests/sharding/jumbo1.js
index 22b4004635e..1e8a3a9fdb2 100644
--- a/jstests/sharding/jumbo1.js
+++ b/jstests/sharding/jumbo1.js
@@ -1,57 +1,53 @@
(function() {
-var s = new ShardingTest({ name: "jumbo1",
- shards: 2,
- mongos: 1,
- other: { chunkSize: 1 } });
+ var s = new ShardingTest({name: "jumbo1", shards: 2, mongos: 1, other: {chunkSize: 1}});
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { x : 1 } } );
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {x: 1}});
-db = s.getDB( "test" );
+ db = s.getDB("test");
-big = "";
-while ( big.length < 10000 )
- big += ".";
+ big = "";
+ while (big.length < 10000)
+ big += ".";
-x = 0;
-var bulk = db.foo.initializeUnorderedBulkOp();
-for ( ; x < 500; x++ )
- bulk.insert( { x : x , big : big } );
+ x = 0;
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ for (; x < 500; x++)
+ bulk.insert({x: x, big: big});
-for ( i=0; i<500; i++ )
- bulk.insert( { x : x , big : big } );
+ for (i = 0; i < 500; i++)
+ bulk.insert({x: x, big: big});
-for ( ; x < 2000; x++ )
- bulk.insert( { x : x , big : big } );
+ for (; x < 2000; x++)
+ bulk.insert({x: x, big: big});
-assert.writeOK( bulk.execute() );
+ assert.writeOK(bulk.execute());
-s.printShardingStatus(true);
-
-res = sh.moveChunk( "test.foo" , { x : 0 } , "shard0001" );
-if ( ! res.ok )
- res = sh.moveChunk( "test.foo" , { x : 0 } , "shard0000" );
+ s.printShardingStatus(true);
-s.printShardingStatus(true);
+ res = sh.moveChunk("test.foo", {x: 0}, "shard0001");
+ if (!res.ok)
+ res = sh.moveChunk("test.foo", {x: 0}, "shard0000");
-sh.setBalancerState( true );
+ s.printShardingStatus(true);
-function diff1(){
- var x = s.chunkCounts( "foo" );
- printjson( x );
- return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
-}
+ sh.setBalancerState(true);
-assert.soon( function(){
- var d = diff1();
- print( "diff: " + d );
- s.printShardingStatus(true);
- return d < 5;
-} , "balance didn't happen" , 1000 * 60 * 5 , 5000 );
+ function diff1() {
+ var x = s.chunkCounts("foo");
+ printjson(x);
+ return Math.max(x.shard0000, x.shard0001) - Math.min(x.shard0000, x.shard0001);
+ }
+ assert.soon(function() {
+ var d = diff1();
+ print("diff: " + d);
+ s.printShardingStatus(true);
+ return d < 5;
+ }, "balance didn't happen", 1000 * 60 * 5, 5000);
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/key_many.js b/jstests/sharding/key_many.js
index 85cde37ba1d..93ce1b4d64a 100644
--- a/jstests/sharding/key_many.js
+++ b/jstests/sharding/key_many.js
@@ -1,169 +1,254 @@
(function() {
-'use strict';
-
-// Values have to be sorted - you must have exactly 6 values in each array
-var types = [
- { name: "string", values: [ "allan", "bob", "eliot", "joe", "mark", "sara" ], keyfield: "k" },
- { name: "double", values: [ 1.2, 3.5, 4.5, 4.6, 6.7, 9.9 ], keyfield: "a" },
- { name: "date", values: [ new Date(1000000), new Date(2000000), new Date(3000000), new Date(4000000), new Date(5000000), new Date(6000000) ], keyfield: "a" },
- { name: "string_id", values: [ "allan", "bob", "eliot", "joe", "mark", "sara" ], keyfield: "_id" },
- { name: "embedded 1", values: [ "allan", "bob", "eliot", "joe", "mark", "sara" ], keyfield: "a.b" },
- { name: "embedded 2", values: [ "allan", "bob", "eliot", "joe", "mark", "sara" ], keyfield: "a.b.c" },
- { name: "object", values: [ {a:1, b:1.2}, {a:1, b:3.5}, {a:1, b:4.5}, {a:2, b:1.2}, {a:2, b:3.5}, {a:2, b:4.5} ], keyfield: "o" },
- { name: "compound", values: [ {a:1, b:1.2}, {a:1, b:3.5}, {a:1, b:4.5}, {a:2, b:1.2}, {a:2, b:3.5}, {a:2, b:4.5} ], keyfield: "o", compound: true },
- { name: "oid_id", values: [ ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId() ], keyfield: "_id" },
- { name: "oid_other", values: [ ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId() ], keyfield: "o" },
+ 'use strict';
+
+ // Values have to be sorted - you must have exactly 6 values in each array
+ var types = [
+ {name: "string", values: ["allan", "bob", "eliot", "joe", "mark", "sara"], keyfield: "k"},
+ {name: "double", values: [1.2, 3.5, 4.5, 4.6, 6.7, 9.9], keyfield: "a"},
+ {
+ name: "date",
+ values: [
+ new Date(1000000),
+ new Date(2000000),
+ new Date(3000000),
+ new Date(4000000),
+ new Date(5000000),
+ new Date(6000000)
+ ],
+ keyfield: "a"
+ },
+ {
+ name: "string_id",
+ values: ["allan", "bob", "eliot", "joe", "mark", "sara"],
+ keyfield: "_id"
+ },
+ {
+ name: "embedded 1",
+ values: ["allan", "bob", "eliot", "joe", "mark", "sara"],
+ keyfield: "a.b"
+ },
+ {
+ name: "embedded 2",
+ values: ["allan", "bob", "eliot", "joe", "mark", "sara"],
+ keyfield: "a.b.c"
+ },
+ {
+ name: "object",
+ values: [
+ {a: 1, b: 1.2},
+ {a: 1, b: 3.5},
+ {a: 1, b: 4.5},
+ {a: 2, b: 1.2},
+ {a: 2, b: 3.5},
+ {a: 2, b: 4.5}
+ ],
+ keyfield: "o"
+ },
+ {
+ name: "compound",
+ values: [
+ {a: 1, b: 1.2},
+ {a: 1, b: 3.5},
+ {a: 1, b: 4.5},
+ {a: 2, b: 1.2},
+ {a: 2, b: 3.5},
+ {a: 2, b: 4.5}
+ ],
+ keyfield: "o",
+ compound: true
+ },
+ {
+ name: "oid_id",
+ values: [ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId()],
+ keyfield: "_id"
+ },
+ {
+ name: "oid_other",
+ values: [ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId(), ObjectId()],
+ keyfield: "o"
+ },
];
-var s = new ShardingTest({ name: "key_many", shards: 2 });
+ var s = new ShardingTest({name: "key_many", shards: 2});
-assert.commandWorked(s.s0.adminCommand({ enableSharding: 'test' }));
-s.ensurePrimaryShard('test', 'shard0001');
+ assert.commandWorked(s.s0.adminCommand({enableSharding: 'test'}));
+ s.ensurePrimaryShard('test', 'shard0001');
-var db = s.getDB('test');
-var primary = s.getPrimaryShard("test").getDB("test");
-var secondary = s.getOther(primary).getDB("test");
+ var db = s.getDB('test');
+ var primary = s.getPrimaryShard("test").getDB("test");
+ var secondary = s.getOther(primary).getDB("test");
-var curT;
+ var curT;
-function makeObjectDotted(v) {
- var o = {};
- if (curT.compound) {
- var prefix = curT.keyfield + '.';
- if (typeof(v) == 'object') {
- for (var key in v)
- o[prefix + key] = v[key];
+ function makeObjectDotted(v) {
+ var o = {};
+ if (curT.compound) {
+ var prefix = curT.keyfield + '.';
+ if (typeof(v) == 'object') {
+ for (var key in v)
+ o[prefix + key] = v[key];
+ } else {
+ for (var key in curT.values[0])
+ o[prefix + key] = v;
+ }
} else {
- for (var key in curT.values[0])
- o[prefix + key] = v;
+ o[curT.keyfield] = v;
}
- } else {
- o[curT.keyfield] = v;
+ return o;
}
- return o;
-}
-function makeObject(v) {
- var o = {};
- var p = o;
+ function makeObject(v) {
+ var o = {};
+ var p = o;
- var keys = curT.keyfield.split('.');
- for(var i=0; i<keys.length-1; i++) {
- p[keys[i]] = {};
- p = p[keys[i]];
- }
-
- p[keys[i]] = v;
+ var keys = curT.keyfield.split('.');
+ for (var i = 0; i < keys.length - 1; i++) {
+ p[keys[i]] = {};
+ p = p[keys[i]];
+ }
- return o;
-}
+ p[keys[i]] = v;
-function makeInQuery() {
- if (curT.compound) {
- // cheating a bit...
- return {'o.a': { $in: [1, 2] }};
- } else {
- return makeObjectDotted({$in: curT.values});
+ return o;
}
-}
-function getKey(o) {
- var keys = curT.keyfield.split('.');
- for(var i = 0; i < keys.length; i++) {
- o = o[keys[i]];
+ function makeInQuery() {
+ if (curT.compound) {
+ // cheating a bit...
+ return {
+ 'o.a': {$in: [1, 2]}
+ };
+ } else {
+ return makeObjectDotted({$in: curT.values});
+ }
}
- return o;
-}
-
-Random.setRandomSeed();
-for (var i = 0; i < types.length; i++) {
- curT = types[i];
-
- print("\n\n#### Now Testing " + curT.name + " ####\n\n");
-
- var shortName = "foo_" + curT.name;
- var longName = "test." + shortName;
-
- var c = db[shortName];
- s.adminCommand({ shardcollection: longName, key: makeObjectDotted(1) });
-
- assert.eq(1, s.config.chunks.find({ ns: longName }).count(), curT.name + " sanity check A");
-
- var unsorted = Array.shuffle(Object.extend([], curT.values));
- c.insert(makeObject(unsorted[0]));
- for (var x = 1; x < unsorted.length; x++) {
- c.save(makeObject(unsorted[x]));
+ function getKey(o) {
+ var keys = curT.keyfield.split('.');
+ for (var i = 0; i < keys.length; i++) {
+ o = o[keys[i]];
+ }
+ return o;
}
- assert.eq(6, c.find().count(), curT.name + " basic count");
+ Random.setRandomSeed();
- s.adminCommand({ split: longName, middle: makeObjectDotted(curT.values[0]) });
- s.adminCommand({ split: longName, middle: makeObjectDotted(curT.values[2]) });
- s.adminCommand({ split: longName, middle: makeObjectDotted(curT.values[5]) });
+ for (var i = 0; i < types.length; i++) {
+ curT = types[i];
- s.adminCommand({ movechunk: longName,
- find: makeObjectDotted(curT.values[2]),
- to: secondary.getMongo().name,
- _waitForDelete: true });
+ print("\n\n#### Now Testing " + curT.name + " ####\n\n");
- s.printChunks();
+ var shortName = "foo_" + curT.name;
+ var longName = "test." + shortName;
- assert.eq(3, primary[shortName].find().toArray().length, curT.name + " primary count");
- assert.eq(3, secondary[shortName].find().toArray().length, curT.name + " secondary count");
+ var c = db[shortName];
+ s.adminCommand({shardcollection: longName, key: makeObjectDotted(1)});
- assert.eq(6, c.find().toArray().length, curT.name + " total count");
- assert.eq(6, c.find().sort(makeObjectDotted(1)).toArray().length, curT.name + " total count sorted");
+ assert.eq(1, s.config.chunks.find({ns: longName}).count(), curT.name + " sanity check A");
- assert.eq(6, c.find().sort(makeObjectDotted(1)).count(), curT.name + " total count with count()");
-
- assert.eq(2, c.find({$or:[makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]}).count(), curT.name + " $or count()");
- assert.eq(2, c.find({$or:[makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]}).itcount(), curT.name + " $or itcount()");
- assert.eq(4, c.find({$nor:[makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]}).count(), curT.name + " $nor count()");
- assert.eq(4, c.find({$nor:[makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]}).itcount(), curT.name + " $nor itcount()");
-
- var stats = c.stats();
- printjson(stats);
- assert.eq(6, stats.count, curT.name + " total count with stats()");
+ var unsorted = Array.shuffle(Object.extend([], curT.values));
+ c.insert(makeObject(unsorted[0]));
+ for (var x = 1; x < unsorted.length; x++) {
+ c.save(makeObject(unsorted[x]));
+ }
- var count = 0;
- for (var shard in stats.shards) {
- count += stats.shards[shard].count;
+ assert.eq(6, c.find().count(), curT.name + " basic count");
+
+ s.adminCommand({split: longName, middle: makeObjectDotted(curT.values[0])});
+ s.adminCommand({split: longName, middle: makeObjectDotted(curT.values[2])});
+ s.adminCommand({split: longName, middle: makeObjectDotted(curT.values[5])});
+
+ s.adminCommand({
+ movechunk: longName,
+ find: makeObjectDotted(curT.values[2]),
+ to: secondary.getMongo().name,
+ _waitForDelete: true
+ });
+
+ s.printChunks();
+
+ assert.eq(3, primary[shortName].find().toArray().length, curT.name + " primary count");
+ assert.eq(3, secondary[shortName].find().toArray().length, curT.name + " secondary count");
+
+ assert.eq(6, c.find().toArray().length, curT.name + " total count");
+ assert.eq(6,
+ c.find().sort(makeObjectDotted(1)).toArray().length,
+ curT.name + " total count sorted");
+
+ assert.eq(
+ 6, c.find().sort(makeObjectDotted(1)).count(), curT.name + " total count with count()");
+
+ assert.eq(
+ 2,
+ c.find({$or: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]})
+ .count(),
+ curT.name + " $or count()");
+ assert.eq(
+ 2,
+ c.find({$or: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]})
+ .itcount(),
+ curT.name + " $or itcount()");
+ assert.eq(
+ 4,
+ c.find({$nor: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]})
+ .count(),
+ curT.name + " $nor count()");
+ assert.eq(
+ 4,
+ c.find({$nor: [makeObjectDotted(curT.values[2]), makeObjectDotted(curT.values[4])]})
+ .itcount(),
+ curT.name + " $nor itcount()");
+
+ var stats = c.stats();
+ printjson(stats);
+ assert.eq(6, stats.count, curT.name + " total count with stats()");
+
+ var count = 0;
+ for (var shard in stats.shards) {
+ count += stats.shards[shard].count;
+ }
+ assert.eq(6, count, curT.name + " total count with stats() sum");
+
+ assert.eq(curT.values,
+ c.find().sort(makeObjectDotted(1)).toArray().map(getKey),
+ curT.name + " sort 1");
+ assert.eq(curT.values,
+ c.find(makeInQuery()).sort(makeObjectDotted(1)).toArray().map(getKey),
+ curT.name + " sort 1 - $in");
+ assert.eq(curT.values.reverse(),
+ c.find().sort(makeObjectDotted(-1)).toArray().map(getKey),
+ curT.name + " sort 2");
+
+ assert.eq(0, c.find({xx: 17}).sort({zz: 1}).count(), curT.name + " xx 0a ");
+ assert.eq(0, c.find({xx: 17}).sort(makeObjectDotted(1)).count(), curT.name + " xx 0b ");
+ assert.eq(0, c.find({xx: 17}).count(), curT.name + " xx 0c ");
+ assert.eq(0, c.find({xx: {$exists: true}}).count(), curT.name + " xx 1 ");
+
+ c.update(makeObjectDotted(curT.values[3]), {$set: {xx: 17}});
+ assert.eq(1, c.find({xx: {$exists: true}}).count(), curT.name + " xx 2 ");
+ assert.eq(curT.values[3], getKey(c.findOne({xx: 17})), curT.name + " xx 3 ");
+
+ assert.writeOK(
+ c.update(makeObjectDotted(curT.values[3]), {$set: {xx: 17}}, {upsert: true}));
+
+ assert.commandWorked(c.ensureIndex({_id: 1}, {unique: true}));
+
+ // multi update
+ var mysum = 0;
+ c.find().forEach(function(z) {
+ mysum += z.xx || 0;
+ });
+ assert.eq(17, mysum, curT.name + " multi update pre");
+
+ c.update({}, {$inc: {xx: 1}}, false, true);
+
+ var mysum = 0;
+ c.find().forEach(function(z) {
+ mysum += z.xx || 0;
+ });
+ assert.eq(23, mysum, curT.name + " multi update");
}
- assert.eq(6, count, curT.name + " total count with stats() sum");
-
- assert.eq(curT.values, c.find().sort(makeObjectDotted(1)).toArray().map(getKey), curT.name + " sort 1");
- assert.eq(curT.values, c.find(makeInQuery()).sort(makeObjectDotted(1)).toArray().map(getKey), curT.name + " sort 1 - $in");
- assert.eq(curT.values.reverse(), c.find().sort(makeObjectDotted(-1)).toArray().map(getKey), curT.name + " sort 2");
-
- assert.eq(0, c.find({ xx: 17 }).sort({ zz: 1 }).count(), curT.name + " xx 0a ");
- assert.eq(0, c.find({ xx: 17 }).sort(makeObjectDotted(1)).count(), curT.name + " xx 0b ");
- assert.eq(0, c.find({ xx: 17 }).count(), curT.name + " xx 0c ");
- assert.eq(0, c.find({ xx: { $exists: true } }).count(), curT.name + " xx 1 ");
-
- c.update(makeObjectDotted(curT.values[3]), { $set: { xx: 17 } });
- assert.eq(1, c.find({ xx: { $exists: true } }).count(), curT.name + " xx 2 ");
- assert.eq(curT.values[3], getKey(c.findOne({ xx: 17 })), curT.name + " xx 3 ");
-
- assert.writeOK(
- c.update(makeObjectDotted(curT.values[3]), { $set: { xx: 17 }}, { upsert: true }));
-
- assert.commandWorked(c.ensureIndex({ _id: 1 }, { unique: true }));
-
- // multi update
- var mysum = 0;
- c.find().forEach(function(z) {
- mysum += z.xx || 0;
- });
- assert.eq(17, mysum, curT.name + " multi update pre");
-
- c.update({}, { $inc: { xx: 1 } }, false, true);
-
- var mysum = 0;
- c.find().forEach(function(z) { mysum += z.xx || 0; });
- assert.eq(23, mysum, curT.name + " multi update");
-}
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/key_string.js b/jstests/sharding/key_string.js
index c5b2e88b694..414e056bf1f 100644
--- a/jstests/sharding/key_string.js
+++ b/jstests/sharding/key_string.js
@@ -1,51 +1,71 @@
(function() {
-var s = new ShardingTest({ name: "keystring", shards: 2 });
-
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { name : 1 } } );
-
-primary = s.getPrimaryShard( "test" ).getDB( "test" );
-seconday = s.getOther( primary ).getDB( "test" );
-
-assert.eq( 1 , s.config.chunks.count() , "sanity check A" );
-
-var db = s.getDB( "test" );
-
-db.foo.save( { name : "eliot" } );
-db.foo.save( { name : "sara" } );
-db.foo.save( { name : "bob" } );
-db.foo.save( { name : "joe" } );
-db.foo.save( { name : "mark" } );
-db.foo.save( { name : "allan" } );
-
-assert.eq( 6 , db.foo.find().count() , "basic count" );
-
-s.adminCommand({ split: "test.foo", middle: { name: "allan" }});
-s.adminCommand({ split: "test.foo", middle: { name: "sara" }});
-s.adminCommand({ split: "test.foo", middle: { name: "eliot" }});
-
-s.adminCommand( { movechunk : "test.foo" , find : { name : "eliot" } , to : seconday.getMongo().name, _waitForDelete : true } );
-
-s.printChunks();
-
-assert.eq( 3 , primary.foo.find().toArray().length , "primary count" );
-assert.eq( 3 , seconday.foo.find().toArray().length , "secondary count" );
-
-assert.eq( 6 , db.foo.find().toArray().length , "total count" );
-assert.eq( 6 , db.foo.find().sort( { name : 1 } ).toArray().length , "total count sorted" );
-
-assert.eq( 6 , db.foo.find().sort( { name : 1 } ).count() , "total count with count()" );
-
-assert.eq( "allan,bob,eliot,joe,mark,sara" , db.foo.find().sort( { name : 1 } ).toArray().map( function(z){ return z.name; } ) , "sort 1" );
-assert.eq( "sara,mark,joe,eliot,bob,allan" , db.foo.find().sort( { name : -1 } ).toArray().map( function(z){ return z.name; } ) , "sort 2" );
-
-// make sure we can't foce a split on an extreme key
-// [allan->joe)
-assert.throws( function(){ s.adminCommand( { split : "test.foo" , middle : { name : "allan" } } ); } );
-assert.throws( function(){ s.adminCommand( { split : "test.foo" , middle : { name : "eliot" } } ); } );
-
-s.stop();
+ var s = new ShardingTest({name: "keystring", shards: 2});
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {name: 1}});
+
+ primary = s.getPrimaryShard("test").getDB("test");
+ seconday = s.getOther(primary).getDB("test");
+
+ assert.eq(1, s.config.chunks.count(), "sanity check A");
+
+ var db = s.getDB("test");
+
+ db.foo.save({name: "eliot"});
+ db.foo.save({name: "sara"});
+ db.foo.save({name: "bob"});
+ db.foo.save({name: "joe"});
+ db.foo.save({name: "mark"});
+ db.foo.save({name: "allan"});
+
+ assert.eq(6, db.foo.find().count(), "basic count");
+
+ s.adminCommand({split: "test.foo", middle: {name: "allan"}});
+ s.adminCommand({split: "test.foo", middle: {name: "sara"}});
+ s.adminCommand({split: "test.foo", middle: {name: "eliot"}});
+
+ s.adminCommand({
+ movechunk: "test.foo",
+ find: {name: "eliot"},
+ to: seconday.getMongo().name,
+ _waitForDelete: true
+ });
+
+ s.printChunks();
+
+ assert.eq(3, primary.foo.find().toArray().length, "primary count");
+ assert.eq(3, seconday.foo.find().toArray().length, "secondary count");
+
+ assert.eq(6, db.foo.find().toArray().length, "total count");
+ assert.eq(6, db.foo.find().sort({name: 1}).toArray().length, "total count sorted");
+
+ assert.eq(6, db.foo.find().sort({name: 1}).count(), "total count with count()");
+
+ assert.eq("allan,bob,eliot,joe,mark,sara",
+ db.foo.find().sort({name: 1}).toArray().map(function(z) {
+ return z.name;
+ }),
+ "sort 1");
+ assert.eq("sara,mark,joe,eliot,bob,allan",
+ db.foo.find()
+ .sort({name: -1})
+ .toArray()
+ .map(function(z) {
+ return z.name;
+ }),
+ "sort 2");
+
+ // make sure we can't foce a split on an extreme key
+ // [allan->joe)
+ assert.throws(function() {
+ s.adminCommand({split: "test.foo", middle: {name: "allan"}});
+ });
+ assert.throws(function() {
+ s.adminCommand({split: "test.foo", middle: {name: "eliot"}});
+ });
+
+ s.stop();
})();
diff --git a/jstests/sharding/lagged_config_secondary.js b/jstests/sharding/lagged_config_secondary.js
index a2756ad7e70..5c28f79f24f 100644
--- a/jstests/sharding/lagged_config_secondary.js
+++ b/jstests/sharding/lagged_config_secondary.js
@@ -2,37 +2,36 @@
* Test that mongos times out when the config server replica set only contains nodes that
* are behind the majority opTime.
*/
-(function(){
-var st = new ShardingTest({ shards: 1 });
+(function() {
+ var st = new ShardingTest({shards: 1});
-var configSecondaryList = st.configRS.getSecondaries();
-var configSecondaryToKill = configSecondaryList[0];
-var delayedConfigSecondary = configSecondaryList[1];
+ var configSecondaryList = st.configRS.getSecondaries();
+ var configSecondaryToKill = configSecondaryList[0];
+ var delayedConfigSecondary = configSecondaryList[1];
-delayedConfigSecondary.getDB('admin').adminCommand({ configureFailPoint: 'rsSyncApplyStop',
- mode: 'alwaysOn' });
+ delayedConfigSecondary.getDB('admin')
+ .adminCommand({configureFailPoint: 'rsSyncApplyStop', mode: 'alwaysOn'});
-var testDB = st.s.getDB('test');
-testDB.adminCommand({ enableSharding: 'test' });
-testDB.adminCommand({ shardCollection: 'test.user', key: { _id: 1 }});
+ var testDB = st.s.getDB('test');
+ testDB.adminCommand({enableSharding: 'test'});
+ testDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
-testDB.user.insert({ _id: 1 });
+ testDB.user.insert({_id: 1});
-st.configRS.stopMaster();
-MongoRunner.stopMongod(configSecondaryToKill.port);
+ st.configRS.stopMaster();
+ MongoRunner.stopMongod(configSecondaryToKill.port);
-// Clears all cached info so mongos will be forced to query from the config.
-st.s.adminCommand({ flushRouterConfig: 1 });
+ // Clears all cached info so mongos will be forced to query from the config.
+ st.s.adminCommand({flushRouterConfig: 1});
-var exception = assert.throws(function() {
- testDB.user.findOne();
-});
+ var exception = assert.throws(function() {
+ testDB.user.findOne();
+ });
-assert.eq(ErrorCodes.ExceededTimeLimit, exception.code);
+ assert.eq(ErrorCodes.ExceededTimeLimit, exception.code);
-var msg = 'Command on database config timed out waiting for read concern to be satisfied.';
-assert.soon(
- function() {
+ var msg = 'Command on database config timed out waiting for read concern to be satisfied.';
+ assert.soon(function() {
var logMessages =
assert.commandWorked(delayedConfigSecondary.adminCommand({getLog: 'global'})).log;
for (var i = 0; i < logMessages.length; i++) {
@@ -41,12 +40,8 @@ assert.soon(
}
}
return false;
- },
- 'Did not see any log entries containing the following message: ' + msg,
- 60000,
- 300
-);
+ }, 'Did not see any log entries containing the following message: ' + msg, 60000, 300);
-st.stop();
+ st.stop();
}());
diff --git a/jstests/sharding/large_chunk.js b/jstests/sharding/large_chunk.js
index ae02733395b..3318142ecac 100644
--- a/jstests/sharding/large_chunk.js
+++ b/jstests/sharding/large_chunk.js
@@ -1,65 +1,66 @@
// Where we test operations dealing with large chunks
(function() {
-// Starts a new sharding environment limiting the chunksize to 1GB (highest value allowed).
-// Note that early splitting will start with a 1/4 of max size currently.
-var s = new ShardingTest({ name: 'large_chunk',
- shards: 2,
- other: { chunkSize: 1024 } });
+ // Starts a new sharding environment limiting the chunksize to 1GB (highest value allowed).
+ // Note that early splitting will start with a 1/4 of max size currently.
+ var s = new ShardingTest({name: 'large_chunk', shards: 2, other: {chunkSize: 1024}});
-// take the balancer out of the equation
-s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true );
-s.config.settings.find().forEach(printjson);
+ // take the balancer out of the equation
+ s.config.settings.update({_id: "balancer"}, {$set: {stopped: true}}, true);
+ s.config.settings.find().forEach(printjson);
-db = s.getDB( "test" );
+ db = s.getDB("test");
-//
-// Step 1 - Test moving a large chunk
-//
+ //
+ // Step 1 - Test moving a large chunk
+ //
-// Turn on sharding on the 'test.foo' collection and generate a large chunk
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
+ // Turn on sharding on the 'test.foo' collection and generate a large chunk
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
-bigString = "";
-while ( bigString.length < 10000 )
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+ bigString = "";
+ while (bigString.length < 10000)
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-inserted = 0;
-num = 0;
-var bulk = db.foo.initializeUnorderedBulkOp();
-while ( inserted < ( 400 * 1024 * 1024 ) ){
- bulk.insert({ _id: num++, s: bigString });
- inserted += bigString.length;
-}
-assert.writeOK(bulk.execute());
+ inserted = 0;
+ num = 0;
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ while (inserted < (400 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+ }
+ assert.writeOK(bulk.execute());
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
-assert.eq( 1 , s.config.chunks.count() , "step 1 - need one large chunk" );
+ assert.eq(1, s.config.chunks.count(), "step 1 - need one large chunk");
-primary = s.getPrimaryShard( "test" ).getDB( "test" );
-secondary = s.getOther( primary ).getDB( "test" );
+ primary = s.getPrimaryShard("test").getDB("test");
+ secondary = s.getOther(primary).getDB("test");
-// Make sure that we don't move that chunk if it goes past what we consider the maximum chunk size
-print("Checkpoint 1a");
-max = 200 * 1024 * 1024;
-assert.throws(function() {
- s.adminCommand({ movechunk: "test.foo",
- find: { _id: 1 },
- to: secondary.getMongo().name,
- maxChunkSizeBytes: max });
+ // Make sure that we don't move that chunk if it goes past what we consider the maximum chunk
+ // size
+ print("Checkpoint 1a");
+ max = 200 * 1024 * 1024;
+ assert.throws(function() {
+ s.adminCommand({
+ movechunk: "test.foo",
+ find: {_id: 1},
+ to: secondary.getMongo().name,
+ maxChunkSizeBytes: max
+ });
});
-// Move the chunk
-print("checkpoint 1b");
-before = s.config.chunks.find().toArray();
-s.adminCommand( { movechunk : "test.foo" , find : { _id : 1 } , to : secondary.getMongo().name } );
-after = s.config.chunks.find().toArray();
-assert.neq( before[0].shard , after[0].shard , "move chunk did not work" );
+ // Move the chunk
+ print("checkpoint 1b");
+ before = s.config.chunks.find().toArray();
+ s.adminCommand({movechunk: "test.foo", find: {_id: 1}, to: secondary.getMongo().name});
+ after = s.config.chunks.find().toArray();
+ assert.neq(before[0].shard, after[0].shard, "move chunk did not work");
-s.config.changelog.find().forEach( printjson );
+ s.config.changelog.find().forEach(printjson);
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/large_skip_one_shard.js b/jstests/sharding/large_skip_one_shard.js
index 49e6551dec0..99c73eb99b3 100644
--- a/jstests/sharding/large_skip_one_shard.js
+++ b/jstests/sharding/large_skip_one_shard.js
@@ -1,35 +1,30 @@
/**
* Tests that a sharded query targeted to a single shard will use passed-in skip.
*/
-var st = new ShardingTest({ shards : 2, mongos : 1});
+var st = new ShardingTest({shards: 2, mongos: 1});
var mongos = st.s0;
-var shards = mongos.getDB( "config" ).shards.find().toArray();
+var shards = mongos.getDB("config").shards.find().toArray();
-var admin = mongos.getDB( "admin" );
-var collSharded = mongos.getCollection( "testdb.collSharded" );
-var collUnSharded = mongos.getCollection( "testdb.collUnSharded" );
+var admin = mongos.getDB("admin");
+var collSharded = mongos.getCollection("testdb.collSharded");
+var collUnSharded = mongos.getCollection("testdb.collUnSharded");
// Set up a sharded and unsharded collection
-assert( admin.runCommand({ enableSharding : collSharded.getDB() + "" }).ok );
-printjson( admin.runCommand({ movePrimary : collSharded.getDB() + "", to : shards[0]._id }) );
-assert( admin.runCommand({ shardCollection : collSharded + "", key : { _id : 1 } }).ok );
-assert( admin.runCommand({ split : collSharded + "", middle : { _id : 0 } }).ok );
-assert( admin.runCommand({ moveChunk : collSharded + "",
- find : { _id : 0 },
- to : shards[1]._id }).ok );
-
-function testSelectWithSkip(coll){
+assert(admin.runCommand({enableSharding: collSharded.getDB() + ""}).ok);
+printjson(admin.runCommand({movePrimary: collSharded.getDB() + "", to: shards[0]._id}));
+assert(admin.runCommand({shardCollection: collSharded + "", key: {_id: 1}}).ok);
+assert(admin.runCommand({split: collSharded + "", middle: {_id: 0}}).ok);
+assert(admin.runCommand({moveChunk: collSharded + "", find: {_id: 0}, to: shards[1]._id}).ok);
+function testSelectWithSkip(coll) {
for (var i = -100; i < 100; i++) {
- assert.writeOK(coll.insert({ _id : i }));
+ assert.writeOK(coll.insert({_id: i}));
}
// Run a query which only requires 5 results from a single shard
- var explain = coll.find({ _id : { $gt : 1 }}).sort({ _id : 1 })
- .skip(90)
- .limit(5)
- .explain("executionStats");
+ var explain =
+ coll.find({_id: {$gt: 1}}).sort({_id: 1}).skip(90).limit(5).explain("executionStats");
assert.lt(explain.executionStats.nReturned, 90);
}
diff --git a/jstests/sharding/limit_push.js b/jstests/sharding/limit_push.js
index 5e7f1c32ab3..055b5c8b788 100644
--- a/jstests/sharding/limit_push.js
+++ b/jstests/sharding/limit_push.js
@@ -2,53 +2,61 @@
// See: http://jira.mongodb.org/browse/SERVER-1896
(function() {
-var s = new ShardingTest({ name: "limit_push", shards: 2, mongos: 1 });
-var db = s.getDB( "test" );
-
-// Create some data
-for (i=0; i < 100; i++) { db.limit_push.insert({ _id : i, x: i}); }
-db.limit_push.ensureIndex( { x : 1 } );
-assert.eq( 100 , db.limit_push.find().length() , "Incorrect number of documents" );
-
-// Shard the collection
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.limit_push" , key : { x : 1 } } );
-
-// Now split the and move the data between the shards
-s.adminCommand( { split : "test.limit_push", middle : { x : 50 }} );
-s.adminCommand( { moveChunk: "test.limit_push",
- find : { x : 51},
- to : s.getOther( s.getPrimaryShard( "test" ) ).name,
- _waitForDelete : true });
-
-// Check that the chunck have split correctly
-assert.eq( 2 , s.config.chunks.count() , "wrong number of chunks");
-
-// The query is asking for the maximum value below a given value
-// db.limit_push.find( { x : { $lt : 60} } ).sort( { x:-1} ).limit(1)
-q = { x : { $lt : 60} };
-
-// Make sure the basic queries are correct
-assert.eq( 60 , db.limit_push.find( q ).count() , "Did not find 60 documents" );
-//rs = db.limit_push.find( q ).sort( { x:-1} ).limit(1)
-//assert.eq( rs , { _id : "1" , x : 59 } , "Did not find document with value 59" );
-
-// Now make sure that the explain shos that each shard is returning a single document as indicated
-// by the "n" element for each shard
-exp = db.limit_push.find( q ).sort( { x:-1} ).limit(1).explain("executionStats");
-printjson( exp );
-
-var execStages = exp.executionStats.executionStages;
-assert.eq("SHARD_MERGE_SORT", execStages.stage, "Expected SHARD_MERGE_SORT as root stage");
-
-var k = 0;
-for (var j in execStages.shards) {
- assert.eq( 1 , execStages.shards[j].executionStages.nReturned,
- "'n' is not 1 from shard000" + k.toString());
- k++;
-}
-
-s.stop();
+ var s = new ShardingTest({name: "limit_push", shards: 2, mongos: 1});
+ var db = s.getDB("test");
+
+ // Create some data
+ for (i = 0; i < 100; i++) {
+ db.limit_push.insert({_id: i, x: i});
+ }
+ db.limit_push.ensureIndex({x: 1});
+ assert.eq(100, db.limit_push.find().length(), "Incorrect number of documents");
+
+ // Shard the collection
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.limit_push", key: {x: 1}});
+
+ // Now split the and move the data between the shards
+ s.adminCommand({split: "test.limit_push", middle: {x: 50}});
+ s.adminCommand({
+ moveChunk: "test.limit_push",
+ find: {x: 51},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+ });
+
+ // Check that the chunck have split correctly
+ assert.eq(2, s.config.chunks.count(), "wrong number of chunks");
+
+ // The query is asking for the maximum value below a given value
+ // db.limit_push.find( { x : { $lt : 60} } ).sort( { x:-1} ).limit(1)
+ q = {
+ x: {$lt: 60}
+ };
+
+ // Make sure the basic queries are correct
+ assert.eq(60, db.limit_push.find(q).count(), "Did not find 60 documents");
+ // rs = db.limit_push.find( q ).sort( { x:-1} ).limit(1)
+ // assert.eq( rs , { _id : "1" , x : 59 } , "Did not find document with value 59" );
+
+ // Now make sure that the explain shos that each shard is returning a single document as
+ // indicated
+ // by the "n" element for each shard
+ exp = db.limit_push.find(q).sort({x: -1}).limit(1).explain("executionStats");
+ printjson(exp);
+
+ var execStages = exp.executionStats.executionStages;
+ assert.eq("SHARD_MERGE_SORT", execStages.stage, "Expected SHARD_MERGE_SORT as root stage");
+
+ var k = 0;
+ for (var j in execStages.shards) {
+ assert.eq(1,
+ execStages.shards[j].executionStages.nReturned,
+ "'n' is not 1 from shard000" + k.toString());
+ k++;
+ }
+
+ s.stop();
})();
diff --git a/jstests/sharding/listDatabases.js b/jstests/sharding/listDatabases.js
index 240081d642d..f6281a2b025 100644
--- a/jstests/sharding/listDatabases.js
+++ b/jstests/sharding/listDatabases.js
@@ -1,13 +1,13 @@
// tests that listDatabases doesn't show config db on a shard, even if it is there
-var test = new ShardingTest({shards: 1, mongos: 1, other: {chunksize:1}});
+var test = new ShardingTest({shards: 1, mongos: 1, other: {chunksize: 1}});
var mongos = test.s0;
var mongod = test.shard0;
-//grab the config db instance by name
-var getDBSection = function (dbsArray, dbToFind) {
- for(var pos in dbsArray) {
+// grab the config db instance by name
+var getDBSection = function(dbsArray, dbToFind) {
+ for (var pos in dbsArray) {
if (dbsArray[pos].name && dbsArray[pos].name === dbToFind)
return dbsArray[pos];
}
@@ -16,16 +16,16 @@ var getDBSection = function (dbsArray, dbToFind) {
var dbInConfigEntryCheck = function(dbEntry) {
assert.neq(null, dbEntry);
- assert(!dbEntry.shards); // db should not be in shard.
+ assert(!dbEntry.shards); // db should not be in shard.
assert.neq(null, dbEntry.sizeOnDisk);
assert.eq(false, dbEntry.empty);
};
-assert.writeOK(mongos.getDB("blah").foo.insert({ _id: 1 }));
-assert.writeOK(mongos.getDB("foo").foo.insert({ _id: 1 }));
-assert.writeOK(mongos.getDB("raw").foo.insert({ _id: 1 }));
+assert.writeOK(mongos.getDB("blah").foo.insert({_id: 1}));
+assert.writeOK(mongos.getDB("foo").foo.insert({_id: 1}));
+assert.writeOK(mongos.getDB("raw").foo.insert({_id: 1}));
-//verify that the config db is not on a shard
+// verify that the config db is not on a shard
var res = mongos.adminCommand("listDatabases");
var dbArray = res.databases;
dbInConfigEntryCheck(getDBSection(dbArray, "config"));
@@ -39,26 +39,26 @@ var localSection = getDBSection(dbArray, 'local');
assert(!localSection);
// add doc in admin db on the config server.
-assert.writeOK(mongos.getDB('admin').test.insert({ _id: 1 }));
+assert.writeOK(mongos.getDB('admin').test.insert({_id: 1}));
res = mongos.adminCommand("listDatabases");
dbArray = res.databases;
dbInConfigEntryCheck(getDBSection(dbArray, "config"));
dbInConfigEntryCheck(getDBSection(dbArray, 'admin'));
-//add doc in config/admin db on the shard
-mongod.getDB("config").foo.insert({_id:1});
-mongod.getDB("admin").foo.insert({_id:1});
+// add doc in config/admin db on the shard
+mongod.getDB("config").foo.insert({_id: 1});
+mongod.getDB("admin").foo.insert({_id: 1});
-//add doc in admin db (via mongos)
-mongos.getDB("admin").foo.insert({_id:1});
+// add doc in admin db (via mongos)
+mongos.getDB("admin").foo.insert({_id: 1});
-//verify that the config db is not on a shard
+// verify that the config db is not on a shard
res = mongos.adminCommand("listDatabases");
dbArray = res.databases;
-//check config db
+// check config db
assert(getDBSection(dbArray, "config"), "config db not found! 2");
assert(!getDBSection(dbArray, "config").shards, "config db is on a shard! 2");
-//check admin db
+// check admin db
assert(getDBSection(dbArray, "admin"), "admin db not found! 2");
assert(!getDBSection(dbArray, "admin").shards, "admin db is on a shard! 2");
diff --git a/jstests/sharding/listshards.js b/jstests/sharding/listshards.js
index b4c87eda7ab..77ecb53695d 100644
--- a/jstests/sharding/listshards.js
+++ b/jstests/sharding/listshards.js
@@ -2,69 +2,67 @@
// Test the listShards command by adding stand-alone and replica-set shards to a cluster
//
(function() {
-'use strict';
+ 'use strict';
-function checkShardName(shardName, shardsArray) {
- var found = false;
- shardsArray.forEach(function(shardObj) {
- if (shardObj._id === shardName) {
- found = true;
- return;
- }
- });
- return found;
-}
+ function checkShardName(shardName, shardsArray) {
+ var found = false;
+ shardsArray.forEach(function(shardObj) {
+ if (shardObj._id === shardName) {
+ found = true;
+ return;
+ }
+ });
+ return found;
+ }
-var shardTest = new ShardingTest({ name: 'listShardsTest',
- shards: 1,
- mongos: 1,
- other: { useHostname: true } });
+ var shardTest = new ShardingTest(
+ {name: 'listShardsTest', shards: 1, mongos: 1, other: {useHostname: true}});
-var mongos = shardTest.s0;
-var res = mongos.adminCommand('listShards');
-assert.commandWorked(res, 'listShards command failed');
-var shardsArray = res.shards;
-assert.eq(shardsArray.length, 1);
+ var mongos = shardTest.s0;
+ var res = mongos.adminCommand('listShards');
+ assert.commandWorked(res, 'listShards command failed');
+ var shardsArray = res.shards;
+ assert.eq(shardsArray.length, 1);
-// add standalone mongod
-var standaloneShard = MongoRunner.runMongod({useHostName: true});
-res = shardTest.admin.runCommand({ addShard: standaloneShard.host, name: 'standalone' });
-assert.commandWorked(res, 'addShard command failed');
-res = mongos.adminCommand('listShards');
-assert.commandWorked(res, 'listShards command failed');
-shardsArray = res.shards;
-assert.eq(shardsArray.length, 2);
-assert(checkShardName('standalone', shardsArray),
- 'listShards command didn\'t return standalone shard: ' + tojson(shardsArray));
+ // add standalone mongod
+ var standaloneShard = MongoRunner.runMongod({useHostName: true});
+ res = shardTest.admin.runCommand({addShard: standaloneShard.host, name: 'standalone'});
+ assert.commandWorked(res, 'addShard command failed');
+ res = mongos.adminCommand('listShards');
+ assert.commandWorked(res, 'listShards command failed');
+ shardsArray = res.shards;
+ assert.eq(shardsArray.length, 2);
+ assert(checkShardName('standalone', shardsArray),
+ 'listShards command didn\'t return standalone shard: ' + tojson(shardsArray));
-// add replica set named 'repl'
-var rs1 = new ReplSetTest({ name: 'repl', nodes: 1, useHostName: true});
-rs1.startSet();
-rs1.initiate();
-res = shardTest.admin.runCommand({ addShard: rs1.getURL()});
-assert.commandWorked(res, 'addShard command failed');
-res = mongos.adminCommand('listShards');
-assert.commandWorked(res, 'listShards command failed');
-shardsArray = res.shards;
-assert.eq(shardsArray.length, 3);
-assert(checkShardName('repl', shardsArray),
- 'listShards command didn\'t return replica set shard: ' + tojson(shardsArray));
+ // add replica set named 'repl'
+ var rs1 = new ReplSetTest({name: 'repl', nodes: 1, useHostName: true});
+ rs1.startSet();
+ rs1.initiate();
+ res = shardTest.admin.runCommand({addShard: rs1.getURL()});
+ assert.commandWorked(res, 'addShard command failed');
+ res = mongos.adminCommand('listShards');
+ assert.commandWorked(res, 'listShards command failed');
+ shardsArray = res.shards;
+ assert.eq(shardsArray.length, 3);
+ assert(checkShardName('repl', shardsArray),
+ 'listShards command didn\'t return replica set shard: ' + tojson(shardsArray));
-// remove 'repl' shard
-assert.soon(function() {
- var res = shardTest.admin.runCommand({ removeShard: 'repl' });
- assert.commandWorked(res, 'removeShard command failed');
- return res.state === 'completed';
-}, 'failed to remove the replica set shard');
+ // remove 'repl' shard
+ assert.soon(function() {
+ var res = shardTest.admin.runCommand({removeShard: 'repl'});
+ assert.commandWorked(res, 'removeShard command failed');
+ return res.state === 'completed';
+ }, 'failed to remove the replica set shard');
-res = mongos.adminCommand('listShards');
-assert.commandWorked(res, 'listShards command failed');
-shardsArray = res.shards;
-assert.eq(shardsArray.length, 2);
-assert(!checkShardName('repl', shardsArray),
- 'listShards command returned removed replica set shard: ' + tojson(shardsArray));
+ res = mongos.adminCommand('listShards');
+ assert.commandWorked(res, 'listShards command failed');
+ shardsArray = res.shards;
+ assert.eq(shardsArray.length, 2);
+ assert(!checkShardName('repl', shardsArray),
+ 'listShards command returned removed replica set shard: ' + tojson(shardsArray));
-rs1.stopSet();
-shardTest.stop();
+ rs1.stopSet();
+ shardTest.stop();
})();
diff --git a/jstests/sharding/localhostAuthBypass.js b/jstests/sharding/localhostAuthBypass.js
index 857b33fad9e..448d40c5649 100644
--- a/jstests/sharding/localhostAuthBypass.js
+++ b/jstests/sharding/localhostAuthBypass.js
@@ -1,7 +1,7 @@
-//SERVER-6591: Localhost authentication exception doesn't work right on sharded cluster
+// SERVER-6591: Localhost authentication exception doesn't work right on sharded cluster
//
-//This test is to ensure that localhost authentication works correctly against a sharded
-//cluster whether they are hosted with "localhost" or a hostname.
+// This test is to ensure that localhost authentication works correctly against a sharded
+// cluster whether they are hosted with "localhost" or a hostname.
var replSetName = "replsets_server-6591";
var keyfile = "jstests/libs/key1";
@@ -15,7 +15,7 @@ var createUser = function(mongo) {
};
var addUsersToEachShard = function(st) {
- for(i = 0; i < numShards; i++) {
+ for (i = 0; i < numShards; i++) {
print("============ adding a user to shard " + i);
var d = st["shard" + i];
d.getDB("admin").createUser({user: username, pwd: password, roles: jsTest.adminUserRoles});
@@ -23,23 +23,21 @@ var addUsersToEachShard = function(st) {
};
var addShard = function(st, shouldPass) {
- var m = MongoRunner.runMongod({ auth: "", keyFile: keyfile, useHostname: false });
- var res = st.getDB("admin").runCommand({ addShard: m.host });
+ var m = MongoRunner.runMongod({auth: "", keyFile: keyfile, useHostname: false});
+ var res = st.getDB("admin").runCommand({addShard: m.host});
if (shouldPass) {
assert.commandWorked(res, "Add shard");
- }
- else {
+ } else {
assert.commandFailed(res, "Add shard");
}
return m.port;
};
-
var findEmptyShard = function(st, ns) {
- var counts = st.chunkCounts( "foo" );
+ var counts = st.chunkCounts("foo");
- for(shard in counts){
- if(counts[shard] == 0) {
+ for (shard in counts) {
+ if (counts[shard] == 0) {
return shard;
}
}
@@ -52,18 +50,26 @@ var assertCannotRunCommands = function(mongo, st) {
// CRUD
var test = mongo.getDB("test");
- assert.throws( function() { test.system.users.findOne(); });
- assert.writeError(test.foo.save({ _id: 0 }));
- assert.throws( function() { test.foo.findOne({_id:0}); });
- assert.writeError(test.foo.update({ _id: 0 }, { $set: { x: 20 }}));
- assert.writeError(test.foo.remove({ _id: 0 }));
+ assert.throws(function() {
+ test.system.users.findOne();
+ });
+ assert.writeError(test.foo.save({_id: 0}));
+ assert.throws(function() {
+ test.foo.findOne({_id: 0});
+ });
+ assert.writeError(test.foo.update({_id: 0}, {$set: {x: 20}}));
+ assert.writeError(test.foo.remove({_id: 0}));
// Multi-shard
assert.throws(function() {
test.foo.mapReduce(
- function() { emit(1, 1); },
- function(id, count) { return Array.sum(count); },
- { out: "other" });
+ function() {
+ emit(1, 1);
+ },
+ function(id, count) {
+ return Array.sum(count);
+ },
+ {out: "other"});
});
// Config
@@ -75,40 +81,46 @@ var assertCannotRunCommands = function(mongo, st) {
var res = mongo.getDB("admin").runCommand({
moveChunk: "test.foo",
find: {_id: 1},
- to: "shard0000" // Arbitrary shard.
+ to: "shard0000" // Arbitrary shard.
});
assert.commandFailedWithCode(res, authorizeErrorCode, "moveChunk");
- assert.commandFailedWithCode(mongo.getDB("test").copyDatabase("admin", "admin2"),
- authorizeErrorCode, "copyDatabase");
+ assert.commandFailedWithCode(
+ mongo.getDB("test").copyDatabase("admin", "admin2"), authorizeErrorCode, "copyDatabase");
// Create collection
- assert.commandFailedWithCode(mongo.getDB("test").createCollection(
- "log", { capped: true, size: 5242880, max: 5000 } ),
- authorizeErrorCode, "createCollection");
+ assert.commandFailedWithCode(
+ mongo.getDB("test").createCollection("log", {capped: true, size: 5242880, max: 5000}),
+ authorizeErrorCode,
+ "createCollection");
// Set/Get system parameters
- var params = [{ param: "journalCommitInterval", val: 200 },
- { param: "logLevel", val: 2 },
- { param: "logUserIds", val: 1 },
- { param: "notablescan", val: 1 },
- { param: "quiet", val: 1 },
- { param: "replApplyBatchSize", val: 10 },
- { param: "replIndexPrefetch", val: "none" },
- { param: "syncdelay", val: 30 },
- { param: "traceExceptions", val: true },
- { param: "sslMode", val: "preferSSL" },
- { param: "clusterAuthMode", val: "sendX509" },
- { param: "userCacheInvalidationIntervalSecs", val: 300 }
- ];
+ var params = [
+ {param: "journalCommitInterval", val: 200},
+ {param: "logLevel", val: 2},
+ {param: "logUserIds", val: 1},
+ {param: "notablescan", val: 1},
+ {param: "quiet", val: 1},
+ {param: "replApplyBatchSize", val: 10},
+ {param: "replIndexPrefetch", val: "none"},
+ {param: "syncdelay", val: 30},
+ {param: "traceExceptions", val: true},
+ {param: "sslMode", val: "preferSSL"},
+ {param: "clusterAuthMode", val: "sendX509"},
+ {param: "userCacheInvalidationIntervalSecs", val: 300}
+ ];
params.forEach(function(p) {
- var cmd = { setParameter: 1 };
+ var cmd = {
+ setParameter: 1
+ };
cmd[p.param] = p.val;
- assert.commandFailedWithCode(mongo.getDB("admin").runCommand(cmd),
- authorizeErrorCode, "setParameter: "+p.param);
+ assert.commandFailedWithCode(
+ mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "setParameter: " + p.param);
});
params.forEach(function(p) {
- var cmd = { getParameter: 1 };
+ var cmd = {
+ getParameter: 1
+ };
cmd[p.param] = 1;
- assert.commandFailedWithCode(mongo.getDB("admin").runCommand(cmd),
- authorizeErrorCode, "getParameter: "+p.param);
+ assert.commandFailedWithCode(
+ mongo.getDB("admin").runCommand(cmd), authorizeErrorCode, "getParameter: " + p.param);
});
};
@@ -121,27 +133,26 @@ var assertCanRunCommands = function(mongo, st) {
// this will throw if it fails
test.system.users.findOne();
- assert.writeOK(test.foo.save({ _id: 0 }));
- assert.writeOK(test.foo.update({ _id: 0 }, { $set: { x: 20 }}));
- assert.writeOK(test.foo.remove({ _id: 0 }));
+ assert.writeOK(test.foo.save({_id: 0}));
+ assert.writeOK(test.foo.update({_id: 0}, {$set: {x: 20}}));
+ assert.writeOK(test.foo.remove({_id: 0}));
// Multi-shard
test.foo.mapReduce(
- function() { emit(1, 1); },
- function(id, count) { return Array.sum(count); },
- { out: "other" }
- );
+ function() {
+ emit(1, 1);
+ },
+ function(id, count) {
+ return Array.sum(count);
+ },
+ {out: "other"});
// Config
// this will throw if it fails
mongo.getDB("config").shards.findOne();
to = findEmptyShard(st, "test.foo");
- var res = mongo.getDB("admin").runCommand({
- moveChunk: "test.foo",
- find: {_id: 1},
- to: to
- });
+ var res = mongo.getDB("admin").runCommand({moveChunk: "test.foo", find: {_id: 1}, to: to});
assert.commandWorked(res);
};
@@ -154,12 +165,12 @@ var setupSharding = function(shardingTest) {
var mongo = shardingTest.s;
print("============ enabling sharding on test.foo.");
- mongo.getDB("admin").runCommand({enableSharding : "test"});
+ mongo.getDB("admin").runCommand({enableSharding: "test"});
shardingTest.ensurePrimaryShard('test', 'shard0001');
- mongo.getDB("admin").runCommand({shardCollection : "test.foo", key : {_id : 1}});
+ mongo.getDB("admin").runCommand({shardCollection: "test.foo", key: {_id: 1}});
var test = mongo.getDB("test");
- for(i = 1; i < 20; i++) {
+ for (i = 1; i < 20; i++) {
test.foo.insert({_id: i});
}
};
@@ -167,13 +178,13 @@ var setupSharding = function(shardingTest) {
var start = function() {
return new ShardingTest({
auth: "",
- keyFile: keyfile,
- shards: numShards,
- chunksize: 1,
- other : {
- nopreallocj: 1,
- useHostname: false // Must use localhost to take advantage of the localhost auth bypass
- }
+ keyFile: keyfile,
+ shards: numShards,
+ chunksize: 1,
+ other: {
+ nopreallocj: 1,
+ useHostname: false // Must use localhost to take advantage of the localhost auth bypass
+ }
});
};
@@ -185,31 +196,25 @@ var shutdown = function(st) {
// ShardingTest.stop does not have a way to provide auth
// information. Therefore, we'll do this manually for now.
- for(i = 0; i < st._mongos.length; i++) {
+ for (i = 0; i < st._mongos.length; i++) {
var port = st["s" + i].port;
- MongoRunner.stopMongos(
- port,
- /*signal*/false,
- { auth : { user: username, pwd: password }}
- );
+ MongoRunner.stopMongos(port,
+ /*signal*/ false,
+ {auth: {user: username, pwd: password}});
}
- for(i = 0; i < st._connections.length; i++) {
+ for (i = 0; i < st._connections.length; i++) {
var port = st["shard" + i].port;
- MongoRunner.stopMongod(
- port,
- /*signal*/false,
- { auth : { user: username, pwd: password }}
- );
+ MongoRunner.stopMongod(port,
+ /*signal*/ false,
+ {auth: {user: username, pwd: password}});
}
- for(i = 0; i < st._configServers.length; i++) {
+ for (i = 0; i < st._configServers.length; i++) {
var c = st["config" + i].port;
- MongoRunner.stopMongod(
- port,
- /*signal*/false,
- { auth : { user: username, pwd: password }}
- );
+ MongoRunner.stopMongod(port,
+ /*signal*/ false,
+ {auth: {user: username, pwd: password}});
}
st.stop();
diff --git a/jstests/sharding/major_version_check.js b/jstests/sharding/major_version_check.js
index cef05411e0c..1b4e1906379 100644
--- a/jstests/sharding/major_version_check.js
+++ b/jstests/sharding/major_version_check.js
@@ -2,53 +2,52 @@
// Tests that only a correct major-version is needed to connect to a shard via mongos
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards : 1, mongos : 2 });
+ var st = new ShardingTest({shards: 1, mongos: 2});
-var mongos = st.s0;
-var staleMongos = st.s1;
-var admin = mongos.getDB( "admin" );
-var config = mongos.getDB( "config" );
-var coll = mongos.getCollection( "foo.bar" );
+ var mongos = st.s0;
+ var staleMongos = st.s1;
+ var admin = mongos.getDB("admin");
+ var config = mongos.getDB("config");
+ var coll = mongos.getCollection("foo.bar");
-// Shard collection
-assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB() + "" }));
-assert.commandWorked(admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }));
+ // Shard collection
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
-// Make sure our stale mongos is up-to-date with no splits
-staleMongos.getCollection( coll + "" ).findOne();
+ // Make sure our stale mongos is up-to-date with no splits
+ staleMongos.getCollection(coll + "").findOne();
-// Run one split
-assert.commandWorked(admin.runCommand({ split : coll + "", middle : { _id : 0 } }));
+ // Run one split
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: 0}}));
-// Make sure our stale mongos is not up-to-date with the split
-printjson( admin.runCommand({ getShardVersion : coll + "" }) );
-printjson( staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }) );
+ // Make sure our stale mongos is not up-to-date with the split
+ printjson(admin.runCommand({getShardVersion: coll + ""}));
+ printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
-// Compare strings b/c timestamp comparison is a bit weird
-assert.eq( Timestamp( 1, 2 ),
- admin.runCommand({ getShardVersion : coll + "" }).version );
-assert.eq( Timestamp( 1, 0 ),
- staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }).version );
+ // Compare strings b/c timestamp comparison is a bit weird
+ assert.eq(Timestamp(1, 2), admin.runCommand({getShardVersion: coll + ""}).version);
+ assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
-// See if our stale mongos is required to catch up to run a findOne on an existing connection
-staleMongos.getCollection( coll + "" ).findOne();
+ // See if our stale mongos is required to catch up to run a findOne on an existing connection
+ staleMongos.getCollection(coll + "").findOne();
-printjson( staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }) );
+ printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
-assert.eq( Timestamp( 1, 0 ),
- staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }).version );
-
-// See if our stale mongos is required to catch up to run a findOne on a new connection
-staleMongos = new Mongo( staleMongos.host );
-staleMongos.getCollection( coll + "" ).findOne();
+ assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
-printjson( staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }) );
+ // See if our stale mongos is required to catch up to run a findOne on a new connection
+ staleMongos = new Mongo(staleMongos.host);
+ staleMongos.getCollection(coll + "").findOne();
-assert.eq( Timestamp( 1, 0 ),
- staleMongos.getDB( "admin" ).runCommand({ getShardVersion : coll + "" }).version );
+ printjson(staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}));
-st.stop();
+ assert.eq(Timestamp(1, 0),
+ staleMongos.getDB("admin").runCommand({getShardVersion: coll + ""}).version);
+
+ st.stop();
})();
diff --git a/jstests/sharding/mapReduce_inSharded.js b/jstests/sharding/mapReduce_inSharded.js
index ff1c76a3534..110be371ba9 100644
--- a/jstests/sharding/mapReduce_inSharded.js
+++ b/jstests/sharding/mapReduce_inSharded.js
@@ -6,27 +6,29 @@ var verifyOutput = function(out) {
assert.eq(out.counts.output, 512, "output count is wrong");
};
-var st = new ShardingTest({ shards : 2,
- verbose : 1,
- mongos : 1,
- other : { chunkSize: 1, enableBalancer: true }});
+var st = new ShardingTest(
+ {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-st.adminCommand( { enablesharding : "mrShard" } );
+st.adminCommand({enablesharding: "mrShard"});
st.ensurePrimaryShard('mrShard', 'shard0001');
-st.adminCommand( { shardcollection : "mrShard.srcSharded", key : { "_id" : 1 } } );
+st.adminCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}});
-var db = st.getDB( "mrShard" );
+var db = st.getDB("mrShard");
var bulk = db.srcSharded.initializeUnorderedBulkOp();
for (j = 0; j < 100; j++) {
for (i = 0; i < 512; i++) {
- bulk.insert({ j: j, i: i });
+ bulk.insert({j: j, i: i});
}
}
assert.writeOK(bulk.execute());
-function map() { emit(this.i, 1); }
-function reduce(key, values) { return Array.sum(values); }
+function map() {
+ emit(this.i, 1);
+}
+function reduce(key, values) {
+ return Array.sum(values);
+}
// sharded src
var suffix = "InSharded";
@@ -34,26 +36,27 @@ var suffix = "InSharded";
var out = db.srcSharded.mapReduce(map, reduce, "mrBasic" + suffix);
verifyOutput(out);
-out = db.srcSharded.mapReduce(map, reduce, { out: { replace: "mrReplace" + suffix } });
+out = db.srcSharded.mapReduce(map, reduce, {out: {replace: "mrReplace" + suffix}});
verifyOutput(out);
-out = db.srcSharded.mapReduce(map, reduce, { out: { merge: "mrMerge" + suffix } });
+out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "mrMerge" + suffix}});
verifyOutput(out);
-out = db.srcSharded.mapReduce(map, reduce, { out: { reduce: "mrReduce" + suffix } });
+out = db.srcSharded.mapReduce(map, reduce, {out: {reduce: "mrReduce" + suffix}});
verifyOutput(out);
-out = db.srcSharded.mapReduce(map, reduce, { out: { inline: 1 } });
+out = db.srcSharded.mapReduce(map, reduce, {out: {inline: 1}});
verifyOutput(out);
assert(out.results != 'undefined', "no results for inline");
-out = db.srcSharded.mapReduce(map, reduce, { out: { replace: "mrReplace" + suffix, db: "mrShardOtherDB" } });
+out = db.srcSharded.mapReduce(
+ map, reduce, {out: {replace: "mrReplace" + suffix, db: "mrShardOtherDB"}});
verifyOutput(out);
out = db.runCommand({
- mapReduce: "srcSharded", // use new name mapReduce rather than mapreduce
+ mapReduce: "srcSharded", // use new name mapReduce rather than mapreduce
map: map,
reduce: reduce,
out: "mrBasic" + "srcSharded",
- });
+});
verifyOutput(out);
diff --git a/jstests/sharding/mapReduce_inSharded_outSharded.js b/jstests/sharding/mapReduce_inSharded_outSharded.js
index 1cfce046732..d1aba2599f0 100644
--- a/jstests/sharding/mapReduce_inSharded_outSharded.js
+++ b/jstests/sharding/mapReduce_inSharded_outSharded.js
@@ -6,51 +6,55 @@ var verifyOutput = function(out) {
assert.eq(out.counts.output, 512, "output count is wrong");
};
-var st = new ShardingTest({ shards : 2,
- verbose : 1,
- mongos : 1,
- other : { chunkSize: 1, enableBalancer: true }});
+var st = new ShardingTest(
+ {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-st.adminCommand( { enablesharding : "mrShard" } );
+st.adminCommand({enablesharding: "mrShard"});
st.ensurePrimaryShard('mrShard', 'shard0001');
-st.adminCommand( { shardcollection : "mrShard.srcSharded", key : { "_id" : 1 } } );
+st.adminCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}});
-var db = st.getDB( "mrShard" );
+var db = st.getDB("mrShard");
var bulk = db.srcSharded.initializeUnorderedBulkOp();
for (j = 0; j < 100; j++) {
for (i = 0; i < 512; i++) {
- bulk.insert({ j: j, i: i });
+ bulk.insert({j: j, i: i});
}
}
assert.writeOK(bulk.execute());
-function map() { emit(this.i, 1); }
-function reduce(key, values) { return Array.sum(values); }
+function map() {
+ emit(this.i, 1);
+}
+function reduce(key, values) {
+ return Array.sum(values);
+}
// sharded src sharded dst
var suffix = "InShardedOutSharded";
-var out = db.srcSharded.mapReduce(map, reduce, { out: { replace: "mrReplace" + suffix, sharded: true } });
+var out =
+ db.srcSharded.mapReduce(map, reduce, {out: {replace: "mrReplace" + suffix, sharded: true}});
verifyOutput(out);
-out = db.srcSharded.mapReduce(map, reduce, { out: { merge: "mrMerge" + suffix, sharded: true } });
+out = db.srcSharded.mapReduce(map, reduce, {out: {merge: "mrMerge" + suffix, sharded: true}});
verifyOutput(out);
-out = db.srcSharded.mapReduce(map, reduce, { out: { reduce: "mrReduce" + suffix, sharded: true } });
+out = db.srcSharded.mapReduce(map, reduce, {out: {reduce: "mrReduce" + suffix, sharded: true}});
verifyOutput(out);
-out = db.srcSharded.mapReduce(map, reduce, { out: { inline: 1 }});
+out = db.srcSharded.mapReduce(map, reduce, {out: {inline: 1}});
verifyOutput(out);
assert(out.results != 'undefined', "no results for inline");
-out = db.srcSharded.mapReduce(map, reduce, { out: { replace: "mrReplace" + suffix, db: "mrShardOtherDB", sharded: true } });
+out = db.srcSharded.mapReduce(
+ map, reduce, {out: {replace: "mrReplace" + suffix, db: "mrShardOtherDB", sharded: true}});
verifyOutput(out);
out = db.runCommand({
- mapReduce: "srcSharded", // use new name mapReduce rather than mapreduce
+ mapReduce: "srcSharded", // use new name mapReduce rather than mapreduce
map: map,
reduce: reduce,
out: "mrBasic" + "srcSharded",
- });
+});
verifyOutput(out);
diff --git a/jstests/sharding/mapReduce_nonSharded.js b/jstests/sharding/mapReduce_nonSharded.js
index 4e36335047b..40fb098931b 100644
--- a/jstests/sharding/mapReduce_nonSharded.js
+++ b/jstests/sharding/mapReduce_nonSharded.js
@@ -6,27 +6,29 @@ var verifyOutput = function(out) {
assert.eq(out.counts.output, 512, "output count is wrong");
};
-var st = new ShardingTest({ shards : 2,
- verbose : 1,
- mongos : 1,
- other : { chunkSize: 1, enableBalancer: true }});
+var st = new ShardingTest(
+ {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-st.adminCommand( { enablesharding : "mrShard" } );
+st.adminCommand({enablesharding: "mrShard"});
st.ensurePrimaryShard('mrShard', 'shard0001');
-st.adminCommand( { shardcollection : "mrShard.srcSharded", key : { "_id" : 1 } } );
+st.adminCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}});
-var db = st.getDB( "mrShard" );
+var db = st.getDB("mrShard");
var bulk = db.srcNonSharded.initializeUnorderedBulkOp();
for (j = 0; j < 100; j++) {
for (i = 0; i < 512; i++) {
- bulk.insert({ j: j, i: i });
+ bulk.insert({j: j, i: i});
}
}
assert.writeOK(bulk.execute());
-function map() { emit(this.i, 1); }
-function reduce(key, values) { return Array.sum(values); }
+function map() {
+ emit(this.i, 1);
+}
+function reduce(key, values) {
+ return Array.sum(values);
+}
// non-sharded in/out
var suffix = "";
@@ -34,26 +36,27 @@ var suffix = "";
out = db.srcNonSharded.mapReduce(map, reduce, "mrBasic" + suffix);
verifyOutput(out);
-out = db.srcNonSharded.mapReduce(map, reduce, { out: { replace: "mrReplace" + suffix } });
+out = db.srcNonSharded.mapReduce(map, reduce, {out: {replace: "mrReplace" + suffix}});
verifyOutput(out);
-out = db.srcNonSharded.mapReduce(map, reduce, { out: { merge: "mrMerge" + suffix } });
+out = db.srcNonSharded.mapReduce(map, reduce, {out: {merge: "mrMerge" + suffix}});
verifyOutput(out);
-out = db.srcNonSharded.mapReduce(map, reduce, { out: { reduce: "mrReduce" + suffix } });
+out = db.srcNonSharded.mapReduce(map, reduce, {out: {reduce: "mrReduce" + suffix}});
verifyOutput(out);
-out = db.srcNonSharded.mapReduce(map, reduce, { out: { inline: 1 }});
+out = db.srcNonSharded.mapReduce(map, reduce, {out: {inline: 1}});
verifyOutput(out);
assert(out.results != 'undefined', "no results for inline");
-out = db.srcNonSharded.mapReduce(map, reduce, { out: { replace: "mrReplace" + suffix, db: "mrShardOtherDB" } });
+out = db.srcNonSharded.mapReduce(
+ map, reduce, {out: {replace: "mrReplace" + suffix, db: "mrShardOtherDB"}});
verifyOutput(out);
out = db.runCommand({
- mapReduce: "srcNonSharded", // use new name mapReduce rather than mapreduce
+ mapReduce: "srcNonSharded", // use new name mapReduce rather than mapreduce
map: map,
reduce: reduce,
out: "mrBasic" + "srcNonSharded",
- });
+});
verifyOutput(out);
diff --git a/jstests/sharding/mapReduce_outSharded.js b/jstests/sharding/mapReduce_outSharded.js
index 5ab50c4c877..34cde2b63ef 100644
--- a/jstests/sharding/mapReduce_outSharded.js
+++ b/jstests/sharding/mapReduce_outSharded.js
@@ -6,51 +6,55 @@ var verifyOutput = function(out) {
assert.eq(out.counts.output, 512, "output count is wrong");
};
-var st = new ShardingTest({ shards : 2,
- verbose : 1,
- mongos : 1,
- other : { chunkSize: 1, enableBalancer: true }});
+var st = new ShardingTest(
+ {shards: 2, verbose: 1, mongos: 1, other: {chunkSize: 1, enableBalancer: true}});
-st.adminCommand( { enablesharding : "mrShard" } );
+st.adminCommand({enablesharding: "mrShard"});
st.ensurePrimaryShard('mrShard', 'shard0001');
-st.adminCommand( { shardcollection : "mrShard.srcSharded", key : { "_id" : 1 } } );
+st.adminCommand({shardcollection: "mrShard.srcSharded", key: {"_id": 1}});
-var db = st.getDB( "mrShard" );
+var db = st.getDB("mrShard");
var bulk = db.srcNonSharded.initializeUnorderedBulkOp();
for (j = 0; j < 100; j++) {
for (i = 0; i < 512; i++) {
- bulk.insert({ j: j, i: i });
+ bulk.insert({j: j, i: i});
}
}
assert.writeOK(bulk.execute());
-function map() { emit(this.i, 1); }
-function reduce(key, values) { return Array.sum(values); }
+function map() {
+ emit(this.i, 1);
+}
+function reduce(key, values) {
+ return Array.sum(values);
+}
// non sharded src sharded dst
var suffix = "OutSharded";
-out = db.srcNonSharded.mapReduce(map, reduce, { out: { replace: "mrReplace" + suffix, sharded: true } });
+out =
+ db.srcNonSharded.mapReduce(map, reduce, {out: {replace: "mrReplace" + suffix, sharded: true}});
verifyOutput(out);
-out = db.srcNonSharded.mapReduce(map, reduce, { out: { merge: "mrMerge" + suffix, sharded: true } });
+out = db.srcNonSharded.mapReduce(map, reduce, {out: {merge: "mrMerge" + suffix, sharded: true}});
verifyOutput(out);
-out = db.srcNonSharded.mapReduce(map, reduce, { out: { reduce: "mrReduce" + suffix, sharded: true } });
+out = db.srcNonSharded.mapReduce(map, reduce, {out: {reduce: "mrReduce" + suffix, sharded: true}});
verifyOutput(out);
-out = db.srcNonSharded.mapReduce(map, reduce, { out: { inline: 1 }});
+out = db.srcNonSharded.mapReduce(map, reduce, {out: {inline: 1}});
verifyOutput(out);
assert(out.results != 'undefined', "no results for inline");
-out = db.srcNonSharded.mapReduce(map, reduce, { out: { replace: "mrReplace" + suffix, db: "mrShardOtherDB", sharded: true } });
+out = db.srcNonSharded.mapReduce(
+ map, reduce, {out: {replace: "mrReplace" + suffix, db: "mrShardOtherDB", sharded: true}});
verifyOutput(out);
out = db.runCommand({
- mapReduce: "srcNonSharded", // use new name mapReduce rather than mapreduce
+ mapReduce: "srcNonSharded", // use new name mapReduce rather than mapreduce
map: map,
reduce: reduce,
out: "mrBasic" + "srcNonSharded",
- });
+});
verifyOutput(out);
diff --git a/jstests/sharding/map_reduce_validation.js b/jstests/sharding/map_reduce_validation.js
index 436ff395ece..7ccbf6130ee 100644
--- a/jstests/sharding/map_reduce_validation.js
+++ b/jstests/sharding/map_reduce_validation.js
@@ -1,29 +1,49 @@
-var st = new ShardingTest({ shards: 1 });
+var st = new ShardingTest({shards: 1});
var testDB = st.s.getDB('test');
-var mapFunc = function() { emit(this.x, 1); };
-var reduceFunc = function(key, values) { return values.length; };
+var mapFunc = function() {
+ emit(this.x, 1);
+};
+var reduceFunc = function(key, values) {
+ return values.length;
+};
-assert.commandFailed(testDB.runCommand({ mapReduce: 'user',
- map: mapFunc,
- reduce: reduceFunc,
- out: { inline: 1, sharded: true }}));
+assert.commandFailed(testDB.runCommand(
+ {mapReduce: 'user', map: mapFunc, reduce: reduceFunc, out: {inline: 1, sharded: true}}));
testDB.bar.insert({i: 1});
-assert.commandFailed(testDB.runCommand({ mapReduce: 'bar',
- map: function() {emit(this.i, this.i*3);},
- reduce: function(key, values) {return Array.sum(values);},
- out: { replace: "foo", db: "admin" }}));
+assert.commandFailed(testDB.runCommand({
+ mapReduce: 'bar',
+ map: function() {
+ emit(this.i, this.i * 3);
+ },
+ reduce: function(key, values) {
+ return Array.sum(values);
+ },
+ out: {replace: "foo", db: "admin"}
+}));
-assert.commandFailed(testDB.runCommand({ mapReduce: 'bar',
- map: function() {emit(this.i, this.i*3);},
- reduce: function(key, values) {return Array.sum(values);},
- out: { replace: "foo", db: "config" }}));
+assert.commandFailed(testDB.runCommand({
+ mapReduce: 'bar',
+ map: function() {
+ emit(this.i, this.i * 3);
+ },
+ reduce: function(key, values) {
+ return Array.sum(values);
+ },
+ out: {replace: "foo", db: "config"}
+}));
-assert.commandWorked(testDB.runCommand({ mapReduce: 'bar',
- map: function() {emit(this.i, this.i*3);},
- reduce: function(key, values) {return Array.sum(values);},
- out: { replace: "foo", db: "test" }}));
+assert.commandWorked(testDB.runCommand({
+ mapReduce: 'bar',
+ map: function() {
+ emit(this.i, this.i * 3);
+ },
+ reduce: function(key, values) {
+ return Array.sum(values);
+ },
+ out: {replace: "foo", db: "test"}
+}));
st.stop();
diff --git a/jstests/sharding/max_time_ms_sharded.js b/jstests/sharding/max_time_ms_sharded.js
index 1a000ea3dde..7194c98750c 100644
--- a/jstests/sharding/max_time_ms_sharded.js
+++ b/jstests/sharding/max_time_ms_sharded.js
@@ -5,213 +5,236 @@
// Note that mongos does not time out commands or query ops (which remains responsibility of mongod,
// pending development of an interrupt framework for mongos).
(function() {
-'use strict';
-
-var st = new ShardingTest({shards: 2});
-
-var mongos = st.s0;
-var shards = [st.shard0, st.shard1];
-var coll = mongos.getCollection("foo.bar");
-var admin = mongos.getDB("admin");
-var exceededTimeLimit = 50; // ErrorCodes::ExceededTimeLimit
-var cursor;
-var res;
-
-// Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which forces mongod to
-// throw if it receives an operation with a max time. See fail point declaration for complete
-// description.
-var configureMaxTimeAlwaysTimeOut = function(mode) {
- assert.commandWorked(shards[0].getDB("admin").runCommand(
- {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
- assert.commandWorked(shards[1].getDB("admin").runCommand(
- {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
-};
-
-// Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which prohibits mongod
-// from enforcing time limits. See fail point declaration for complete description.
-var configureMaxTimeNeverTimeOut = function(mode) {
- assert.commandWorked(shards[0].getDB("admin").runCommand(
- {configureFailPoint: "maxTimeNeverTimeOut", mode: mode}));
- assert.commandWorked(shards[1].getDB("admin").runCommand(
- {configureFailPoint: "maxTimeNeverTimeOut", mode: mode}));
-};
-
-//
-// Pre-split collection: shard 0 takes {_id: {$lt: 0}}, shard 1 takes {_id: {$gte: 0}}.
-//
-assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()}));
-admin.runCommand({movePrimary: coll.getDB().getName(),
- to: "shard0000"});
-assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(),
- key: {_id: 1}}));
-assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {_id: 0}}));
-assert.commandWorked(admin.runCommand({moveChunk: coll.getFullName(),
- find: {_id: 0},
- to: "shard0001"}));
-
-//
-// Insert 100 documents into sharded collection, such that each shard owns 50.
-//
-var bulk = coll.initializeUnorderedBulkOp();
-for (var i = -50; i < 50; i++) {
- bulk.insert({ _id: i });
-}
-assert.writeOK(bulk.execute());
-assert.eq(50, shards[0].getCollection(coll.getFullName()).count());
-assert.eq(50, shards[1].getCollection(coll.getFullName()).count());
-
-
-//
-// Test that mongos correctly forwards max time to shards for sharded queries. Uses
-// maxTimeAlwaysTimeOut to ensure mongod throws if it receives a max time.
-//
-
-// Positive test.
-configureMaxTimeAlwaysTimeOut("alwaysOn");
-cursor = coll.find();
-cursor.maxTimeMS(60*1000);
-assert.throws(function() { cursor.next(); },
- [],
- "expected query to fail in mongod due to maxTimeAlwaysTimeOut fail point");
-
-// Negative test.
-configureMaxTimeAlwaysTimeOut("off");
-cursor = coll.find();
-cursor.maxTimeMS(60*1000);
-assert.doesNotThrow(function() { cursor.next(); },
- [],
- "expected query to not hit time limit in mongod");
-
-//
-// Test that mongos correctly times out max time sharded getmore operations. Uses
-// maxTimeNeverTimeOut to ensure mongod doesn't enforce a time limit.
-//
-// TODO: This is unimplemented. A test for this functionality should be written as
-// part of the work for SERVER-19410.
-//
-
-configureMaxTimeNeverTimeOut("alwaysOn");
-
-// Positive test. TODO: see above.
-
-// Negative test. ~10s operation, with a high (1-day) limit.
-cursor = coll.find({$where: function() { sleep(100); return true; }});
-cursor.batchSize(2);
-cursor.maxTimeMS(1000*60*60*24);
-assert.doesNotThrow(function() { cursor.next(); },
- [],
- "did not expect mongos to time out first batch of query");
-assert.doesNotThrow(function() { cursor.itcount(); },
- [],
- "did not expect getmore ops to hit the time limit");
-
-configureMaxTimeNeverTimeOut("off");
-
-//
-// Test that mongos correctly forwards max time to shards for sharded commands. Uses
-// maxTimeAlwaysTimeOut to ensure mongod throws if it receives a max time.
-//
-
-// Positive test for "validate".
-configureMaxTimeAlwaysTimeOut("alwaysOn");
-res = coll.runCommand("validate", {maxTimeMS: 60*1000});
-assert.commandFailed(res,
- "expected validate to fail in mongod due to maxTimeAlwaysTimeOut fail point");
-assert.eq(res["code"],
- exceededTimeLimit,
- "expected code " + exceededTimeLimit + " from validate, instead got: " + tojson(res));
-
-// Negative test for "validate".
-configureMaxTimeAlwaysTimeOut("off");
-assert.commandWorked(coll.runCommand("validate", {maxTimeMS: 60*1000}),
- "expected validate to not hit time limit in mongod");
-
-// Positive test for "count".
-configureMaxTimeAlwaysTimeOut("alwaysOn");
-res = coll.runCommand("count", {maxTimeMS: 60*1000});
-assert.commandFailed(res,
- "expected count to fail in mongod due to maxTimeAlwaysTimeOut fail point");
-assert.eq(res["code"],
- exceededTimeLimit,
- "expected code " + exceededTimeLimit + " from count , instead got: " + tojson(res));
-
-// Negative test for "count".
-configureMaxTimeAlwaysTimeOut("off");
-assert.commandWorked(coll.runCommand("count", {maxTimeMS: 60*1000}),
- "expected count to not hit time limit in mongod");
-
-// Positive test for "collStats".
-configureMaxTimeAlwaysTimeOut("alwaysOn");
-res = coll.runCommand("collStats", {maxTimeMS: 60*1000});
-assert.commandFailed(res,
- "expected collStats to fail in mongod due to maxTimeAlwaysTimeOut fail point");
-assert.eq(res["code"],
- exceededTimeLimit,
- "expected code " + exceededTimeLimit + " from collStats, instead got: " + tojson(res));
-
-// Negative test for "collStats".
-configureMaxTimeAlwaysTimeOut("off");
-assert.commandWorked(coll.runCommand("collStats", {maxTimeMS: 60*1000}),
- "expected collStats to not hit time limit in mongod");
-
-// Positive test for "mapReduce".
-configureMaxTimeAlwaysTimeOut("alwaysOn");
-res = coll.runCommand("mapReduce", {map: function() { emit(0, 0); },
- reduce: function(key, values) { return 0; },
- out: {inline: 1},
- maxTimeMS: 60*1000});
-assert.commandFailed(res,
- "expected mapReduce to fail in mongod due to maxTimeAlwaysTimeOut fail point");
-assert.eq(res["code"],
- exceededTimeLimit,
- "expected code " + exceededTimeLimit + " from mapReduce, instead got: " + tojson(res));
-
-// Negative test for "mapReduce".
-configureMaxTimeAlwaysTimeOut("off");
-assert.commandWorked(coll.runCommand("mapReduce", {map: function() { emit(0, 0); },
- reduce: function(key, values) { return 0; },
- out: {inline: 1},
- maxTimeMS: 60*1000}),
- "expected mapReduce to not hit time limit in mongod");
-
-// Positive test for "aggregate".
-configureMaxTimeAlwaysTimeOut("alwaysOn");
-res = coll.runCommand("aggregate", {pipeline: [],
- maxTimeMS: 60*1000});
-assert.commandFailed(res,
- "expected aggregate to fail in mongod due to maxTimeAlwaysTimeOut fail point");
-assert.eq(res["code"],
- exceededTimeLimit,
- "expected code " + exceededTimeLimit + " from aggregate , instead got: " + tojson(res));
-
-// Negative test for "aggregate".
-configureMaxTimeAlwaysTimeOut("off");
-assert.commandWorked(coll.runCommand("aggregate", {pipeline: [],
- maxTimeMS: 60*1000}),
- "expected aggregate to not hit time limit in mongod");
-
-// Positive test for "moveChunk".
-configureMaxTimeAlwaysTimeOut("alwaysOn");
-res = admin.runCommand({moveChunk: coll.getFullName(),
- find: {_id: 0},
- to: "shard0000",
- maxTimeMS: 1000*60*60*24});
-assert.commandFailed(res,
- "expected moveChunk to fail in mongod due to maxTimeAlwaysTimeOut fail point");
-assert.eq(res["code"],
- exceededTimeLimit,
- "expected code " + exceededTimeLimit + " from moveChunk, instead got: " + tojson(res));
-
-// Negative test for "moveChunk".
-configureMaxTimeAlwaysTimeOut("off");
-assert.commandWorked(admin.runCommand({moveChunk: coll.getFullName(),
- find: {_id: 0},
- to: "shard0000",
- maxTimeMS: 1000*60*60*24}),
- "expected moveChunk to not hit time limit in mongod");
-
-// TODO Test additional commmands.
-
-st.stop();
+ 'use strict';
+
+ var st = new ShardingTest({shards: 2});
+
+ var mongos = st.s0;
+ var shards = [st.shard0, st.shard1];
+ var coll = mongos.getCollection("foo.bar");
+ var admin = mongos.getDB("admin");
+ var exceededTimeLimit = 50; // ErrorCodes::ExceededTimeLimit
+ var cursor;
+ var res;
+
+ // Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which forces mongod
+ // to
+ // throw if it receives an operation with a max time. See fail point declaration for complete
+ // description.
+ var configureMaxTimeAlwaysTimeOut = function(mode) {
+ assert.commandWorked(shards[0].getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
+ assert.commandWorked(shards[1].getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeAlwaysTimeOut", mode: mode}));
+ };
+
+ // Helper function to configure "maxTimeAlwaysTimeOut" fail point on shards, which prohibits
+ // mongod
+ // from enforcing time limits. See fail point declaration for complete description.
+ var configureMaxTimeNeverTimeOut = function(mode) {
+ assert.commandWorked(shards[0].getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeNeverTimeOut", mode: mode}));
+ assert.commandWorked(shards[1].getDB("admin").runCommand(
+ {configureFailPoint: "maxTimeNeverTimeOut", mode: mode}));
+ };
+
+ //
+ // Pre-split collection: shard 0 takes {_id: {$lt: 0}}, shard 1 takes {_id: {$gte: 0}}.
+ //
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().getName()}));
+ admin.runCommand({movePrimary: coll.getDB().getName(), to: "shard0000"});
+ assert.commandWorked(admin.runCommand({shardCollection: coll.getFullName(), key: {_id: 1}}));
+ assert.commandWorked(admin.runCommand({split: coll.getFullName(), middle: {_id: 0}}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: coll.getFullName(), find: {_id: 0}, to: "shard0001"}));
+
+ //
+ // Insert 100 documents into sharded collection, such that each shard owns 50.
+ //
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = -50; i < 50; i++) {
+ bulk.insert({_id: i});
+ }
+ assert.writeOK(bulk.execute());
+ assert.eq(50, shards[0].getCollection(coll.getFullName()).count());
+ assert.eq(50, shards[1].getCollection(coll.getFullName()).count());
+
+ //
+ // Test that mongos correctly forwards max time to shards for sharded queries. Uses
+ // maxTimeAlwaysTimeOut to ensure mongod throws if it receives a max time.
+ //
+
+ // Positive test.
+ configureMaxTimeAlwaysTimeOut("alwaysOn");
+ cursor = coll.find();
+ cursor.maxTimeMS(60 * 1000);
+ assert.throws(function() {
+ cursor.next();
+ }, [], "expected query to fail in mongod due to maxTimeAlwaysTimeOut fail point");
+
+ // Negative test.
+ configureMaxTimeAlwaysTimeOut("off");
+ cursor = coll.find();
+ cursor.maxTimeMS(60 * 1000);
+ assert.doesNotThrow(function() {
+ cursor.next();
+ }, [], "expected query to not hit time limit in mongod");
+
+ //
+ // Test that mongos correctly times out max time sharded getmore operations. Uses
+ // maxTimeNeverTimeOut to ensure mongod doesn't enforce a time limit.
+ //
+ // TODO: This is unimplemented. A test for this functionality should be written as
+ // part of the work for SERVER-19410.
+ //
+
+ configureMaxTimeNeverTimeOut("alwaysOn");
+
+ // Positive test. TODO: see above.
+
+ // Negative test. ~10s operation, with a high (1-day) limit.
+ cursor = coll.find({
+ $where: function() {
+ sleep(100);
+ return true;
+ }
+ });
+ cursor.batchSize(2);
+ cursor.maxTimeMS(1000 * 60 * 60 * 24);
+ assert.doesNotThrow(function() {
+ cursor.next();
+ }, [], "did not expect mongos to time out first batch of query");
+ assert.doesNotThrow(function() {
+ cursor.itcount();
+ }, [], "did not expect getmore ops to hit the time limit");
+
+ configureMaxTimeNeverTimeOut("off");
+
+ //
+ // Test that mongos correctly forwards max time to shards for sharded commands. Uses
+ // maxTimeAlwaysTimeOut to ensure mongod throws if it receives a max time.
+ //
+
+ // Positive test for "validate".
+ configureMaxTimeAlwaysTimeOut("alwaysOn");
+ res = coll.runCommand("validate", {maxTimeMS: 60 * 1000});
+ assert.commandFailed(
+ res, "expected validate to fail in mongod due to maxTimeAlwaysTimeOut fail point");
+ assert.eq(res["code"],
+ exceededTimeLimit,
+ "expected code " + exceededTimeLimit + " from validate, instead got: " + tojson(res));
+
+ // Negative test for "validate".
+ configureMaxTimeAlwaysTimeOut("off");
+ assert.commandWorked(coll.runCommand("validate", {maxTimeMS: 60 * 1000}),
+ "expected validate to not hit time limit in mongod");
+
+ // Positive test for "count".
+ configureMaxTimeAlwaysTimeOut("alwaysOn");
+ res = coll.runCommand("count", {maxTimeMS: 60 * 1000});
+ assert.commandFailed(res,
+ "expected count to fail in mongod due to maxTimeAlwaysTimeOut fail point");
+ assert.eq(res["code"],
+ exceededTimeLimit,
+ "expected code " + exceededTimeLimit + " from count , instead got: " + tojson(res));
+
+ // Negative test for "count".
+ configureMaxTimeAlwaysTimeOut("off");
+ assert.commandWorked(coll.runCommand("count", {maxTimeMS: 60 * 1000}),
+ "expected count to not hit time limit in mongod");
+
+ // Positive test for "collStats".
+ configureMaxTimeAlwaysTimeOut("alwaysOn");
+ res = coll.runCommand("collStats", {maxTimeMS: 60 * 1000});
+ assert.commandFailed(
+ res, "expected collStats to fail in mongod due to maxTimeAlwaysTimeOut fail point");
+ assert.eq(
+ res["code"],
+ exceededTimeLimit,
+ "expected code " + exceededTimeLimit + " from collStats, instead got: " + tojson(res));
+
+ // Negative test for "collStats".
+ configureMaxTimeAlwaysTimeOut("off");
+ assert.commandWorked(coll.runCommand("collStats", {maxTimeMS: 60 * 1000}),
+ "expected collStats to not hit time limit in mongod");
+
+ // Positive test for "mapReduce".
+ configureMaxTimeAlwaysTimeOut("alwaysOn");
+ res = coll.runCommand("mapReduce",
+ {
+ map: function() {
+ emit(0, 0);
+ },
+ reduce: function(key, values) {
+ return 0;
+ },
+ out: {inline: 1},
+ maxTimeMS: 60 * 1000
+ });
+ assert.commandFailed(
+ res, "expected mapReduce to fail in mongod due to maxTimeAlwaysTimeOut fail point");
+ assert.eq(
+ res["code"],
+ exceededTimeLimit,
+ "expected code " + exceededTimeLimit + " from mapReduce, instead got: " + tojson(res));
+
+ // Negative test for "mapReduce".
+ configureMaxTimeAlwaysTimeOut("off");
+ assert.commandWorked(coll.runCommand("mapReduce",
+ {
+ map: function() {
+ emit(0, 0);
+ },
+ reduce: function(key, values) {
+ return 0;
+ },
+ out: {inline: 1},
+ maxTimeMS: 60 * 1000
+ }),
+ "expected mapReduce to not hit time limit in mongod");
+
+ // Positive test for "aggregate".
+ configureMaxTimeAlwaysTimeOut("alwaysOn");
+ res = coll.runCommand("aggregate", {pipeline: [], maxTimeMS: 60 * 1000});
+ assert.commandFailed(
+ res, "expected aggregate to fail in mongod due to maxTimeAlwaysTimeOut fail point");
+ assert.eq(
+ res["code"],
+ exceededTimeLimit,
+ "expected code " + exceededTimeLimit + " from aggregate , instead got: " + tojson(res));
+
+ // Negative test for "aggregate".
+ configureMaxTimeAlwaysTimeOut("off");
+ assert.commandWorked(coll.runCommand("aggregate", {pipeline: [], maxTimeMS: 60 * 1000}),
+ "expected aggregate to not hit time limit in mongod");
+
+ // Positive test for "moveChunk".
+ configureMaxTimeAlwaysTimeOut("alwaysOn");
+ res = admin.runCommand({
+ moveChunk: coll.getFullName(),
+ find: {_id: 0},
+ to: "shard0000",
+ maxTimeMS: 1000 * 60 * 60 * 24
+ });
+ assert.commandFailed(
+ res, "expected moveChunk to fail in mongod due to maxTimeAlwaysTimeOut fail point");
+ assert.eq(
+ res["code"],
+ exceededTimeLimit,
+ "expected code " + exceededTimeLimit + " from moveChunk, instead got: " + tojson(res));
+
+ // Negative test for "moveChunk".
+ configureMaxTimeAlwaysTimeOut("off");
+ assert.commandWorked(admin.runCommand({
+ moveChunk: coll.getFullName(),
+ find: {_id: 0},
+ to: "shard0000",
+ maxTimeMS: 1000 * 60 * 60 * 24
+ }),
+ "expected moveChunk to not hit time limit in mongod");
+
+ // TODO Test additional commmands.
+
+ st.stop();
})();
diff --git a/jstests/sharding/merge_chunks_basic.js b/jstests/sharding/merge_chunks_basic.js
index b8ad0040182..8cdcf05f61a 100644
--- a/jstests/sharding/merge_chunks_basic.js
+++ b/jstests/sharding/merge_chunks_basic.js
@@ -3,62 +3,58 @@
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({mongos:1, shards:2});
+ var st = new ShardingTest({mongos: 1, shards: 2});
-var mongos = st.s0;
+ var mongos = st.s0;
-var kDbName = 'db';
+ var kDbName = 'db';
-var shards = mongos.getCollection('config.shards').find().toArray();
+ var shards = mongos.getCollection('config.shards').find().toArray();
-var shard0 = shards[0]._id;
-var shard1 = shards[1]._id;
+ var shard0 = shards[0]._id;
+ var shard1 = shards[1]._id;
-var ns = kDbName + ".foo";
+ var ns = kDbName + ".foo";
-assert.commandWorked(mongos.adminCommand({enableSharding : kDbName}));
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
-st.ensurePrimaryShard(kDbName, shard0);
+ st.ensurePrimaryShard(kDbName, shard0);
-// Fail if invalid namespace.
-assert.commandFailed(mongos.adminCommand({mergeChunks: '', bounds: [ {a: -1}, {a: 1} ]}));
+ // Fail if invalid namespace.
+ assert.commandFailed(mongos.adminCommand({mergeChunks: '', bounds: [{a: -1}, {a: 1}]}));
-// Fail if database does not exist.
-assert.commandFailed(mongos.adminCommand({mergeChunks: 'a.b', bounds: [ {a: -1}, {a: 1} ]}));
+ // Fail if database does not exist.
+ assert.commandFailed(mongos.adminCommand({mergeChunks: 'a.b', bounds: [{a: -1}, {a: 1}]}));
-// Fail if collection is unsharded.
-assert.commandFailed(mongos.adminCommand({mergeChunks: kDbName + '.xxx',
- bounds: [ {a: -1}, {a: 1} ]}));
+ // Fail if collection is unsharded.
+ assert.commandFailed(
+ mongos.adminCommand({mergeChunks: kDbName + '.xxx', bounds: [{a: -1}, {a: 1}]}));
-// Errors if either bounds is not a valid shard key.
-assert.eq(0, mongos.getDB('config').chunks.count({ns: ns}));
-assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {a: 1}}));
-assert.eq(1, mongos.getDB('config').chunks.count({ns: ns}));
-assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: 0}}));
-assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: -1}}));
-assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: 1}}));
+ // Errors if either bounds is not a valid shard key.
+ assert.eq(0, mongos.getDB('config').chunks.count({ns: ns}));
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {a: 1}}));
+ assert.eq(1, mongos.getDB('config').chunks.count({ns: ns}));
+ assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: 0}}));
+ assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: -1}}));
+ assert.commandWorked(mongos.adminCommand({split: ns, middle: {a: 1}}));
-
-assert.commandFailed(mongos.adminCommand({mergeChunks: ns,
- bounds: [ {x: -1}, {a: 1} ]}));
+ assert.commandFailed(mongos.adminCommand({mergeChunks: ns, bounds: [{x: -1}, {a: 1}]}));
+ // Fail if a wrong key.
+ assert.commandFailed(mongos.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {x: 1}]}));
-// Fail if a wrong key.
-assert.commandFailed(mongos.adminCommand({mergeChunks: ns,
- bounds: [ {a: -1}, {x: 1} ]}));
+ // Fail if chunks do not contain a bound.
+ assert.commandFailed(mongos.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {a: 10}]}));
-// Fail if chunks do not contain a bound.
-assert.commandFailed(mongos.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {a: 10}]}));
+ // Validate metadata.
+ // There are four chunks [{$minKey, -1}, {-1, 0}, {0, 1}, {1, $maxKey}]
+ assert.eq(4, mongos.getDB('config').chunks.count({ns: ns}));
+ assert.commandWorked(mongos.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {a: 1}]}));
+ assert.eq(3, mongos.getDB('config').chunks.count({ns: ns}));
+ assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, min: {a: -1}, max: {a: 1}}));
-// Validate metadata.
-// There are four chunks [{$minKey, -1}, {-1, 0}, {0, 1}, {1, $maxKey}]
-assert.eq(4, mongos.getDB('config').chunks.count({ns: ns}));
-assert.commandWorked(mongos.adminCommand({mergeChunks: ns, bounds: [{a: -1}, {a: 1}]}));
-assert.eq(3, mongos.getDB('config').chunks.count({ns: ns}));
-assert.eq(1, mongos.getDB('config').chunks.count({ns: ns, min: {a: -1}, max: {a: 1}}));
-
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/merge_chunks_test.js b/jstests/sharding/merge_chunks_test.js
index 2a853bbd1d9..6b00a6532ea 100644
--- a/jstests/sharding/merge_chunks_test.js
+++ b/jstests/sharding/merge_chunks_test.js
@@ -2,121 +2,105 @@
// Tests that merging chunks via mongos works/doesn't work with different chunk configurations
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards : 2, mongos : 2 });
+ var st = new ShardingTest({shards: 2, mongos: 2});
-var mongos = st.s0;
-var staleMongos = st.s1;
-var admin = mongos.getDB( "admin" );
-var shards = mongos.getCollection( "config.shards" ).find().toArray();
-var coll = mongos.getCollection( "foo.bar" );
+ var mongos = st.s0;
+ var staleMongos = st.s1;
+ var admin = mongos.getDB("admin");
+ var shards = mongos.getCollection("config.shards").find().toArray();
+ var coll = mongos.getCollection("foo.bar");
-assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
-printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
-assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
+ assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+ printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id}));
+ assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
-// Create ranges MIN->0,0->10,(hole),20->40,40->50,50->90,(hole),100->110,110->MAX on first shard
-jsTest.log( "Creating ranges..." );
+ // Create ranges MIN->0,0->10,(hole),20->40,40->50,50->90,(hole),100->110,110->MAX on first
+ // shard
+ jsTest.log("Creating ranges...");
-assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 10 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 20 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 40 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 50 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 90 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 100 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 110 } }).ok );
+ assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
+ assert(admin.runCommand({split: coll + "", middle: {_id: 10}}).ok);
+ assert(admin.runCommand({split: coll + "", middle: {_id: 20}}).ok);
+ assert(admin.runCommand({split: coll + "", middle: {_id: 40}}).ok);
+ assert(admin.runCommand({split: coll + "", middle: {_id: 50}}).ok);
+ assert(admin.runCommand({split: coll + "", middle: {_id: 90}}).ok);
+ assert(admin.runCommand({split: coll + "", middle: {_id: 100}}).ok);
+ assert(admin.runCommand({split: coll + "", middle: {_id: 110}}).ok);
-assert( admin.runCommand({ moveChunk : coll + "", find : { _id : 10 }, to : shards[1]._id }).ok );
-assert( admin.runCommand({ moveChunk : coll + "", find : { _id : 90 }, to : shards[1]._id }).ok );
+ assert(admin.runCommand({moveChunk: coll + "", find: {_id: 10}, to: shards[1]._id}).ok);
+ assert(admin.runCommand({moveChunk: coll + "", find: {_id: 90}, to: shards[1]._id}).ok);
-st.printShardingStatus();
+ st.printShardingStatus();
-// Insert some data into each of the consolidated ranges
-assert.writeOK(coll.insert({ _id : 0 }));
-assert.writeOK(coll.insert({ _id : 40 }));
-assert.writeOK(coll.insert({ _id : 110 }));
+ // Insert some data into each of the consolidated ranges
+ assert.writeOK(coll.insert({_id: 0}));
+ assert.writeOK(coll.insert({_id: 40}));
+ assert.writeOK(coll.insert({_id: 110}));
-var staleCollection = staleMongos.getCollection( coll + "" );
+ var staleCollection = staleMongos.getCollection(coll + "");
-jsTest.log( "Trying merges that should fail..." );
+ jsTest.log("Trying merges that should fail...");
-// S0: min->0, 0->10, 20->40, 40->50, 50->90, 100->110, 110->max
-// S1: 10->20, 90->100
+ // S0: min->0, 0->10, 20->40, 40->50, 50->90, 100->110, 110->max
+ // S1: 10->20, 90->100
-// Make sure merging non-exact chunks is invalid
+ // Make sure merging non-exact chunks is invalid
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : MinKey }, { _id : 5 }] }).ok );
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 5 }, { _id : 10 }] }).ok );
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 15 }, { _id : 50 }] }).ok );
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 20 }, { _id : 55 }] }).ok );
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 105 }, { _id : MaxKey }] }).ok );
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 5}]}).ok);
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 5}, {_id: 10}]}).ok);
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 15}, {_id: 50}]}).ok);
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 20}, {_id: 55}]}).ok);
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 105}, {_id: MaxKey}]}).ok);
-// Make sure merging single chunks is invalid
+ // Make sure merging single chunks is invalid
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : MinKey }, { _id : 0 }] }).ok );
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 20 }, { _id : 40 }] }).ok );
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 110 }, { _id : MaxKey }] }).ok );
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 0}]}).ok);
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 20}, {_id: 40}]}).ok);
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 110}, {_id: MaxKey}]}).ok);
-// Make sure merging over holes is invalid
+ // Make sure merging over holes is invalid
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 0 }, { _id : 40 }] }).ok );
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 40 }, { _id : 110 }] }).ok );
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 50 }, { _id : 110 }] }).ok );
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 0}, {_id: 40}]}).ok);
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 40}, {_id: 110}]}).ok);
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 50}, {_id: 110}]}).ok);
-// Make sure merging between shards is invalid
+ // Make sure merging between shards is invalid
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 0 }, { _id : 20 }] }).ok );
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 10 }, { _id : 40 }] }).ok );
-assert( !admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 40 }, { _id : 100 }] }).ok );
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 0}, {_id: 20}]}).ok);
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 10}, {_id: 40}]}).ok);
+ assert(!admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 40}, {_id: 100}]}).ok);
-assert.eq( 3, staleCollection.find().itcount() );
+ assert.eq(3, staleCollection.find().itcount());
-jsTest.log( "Trying merges that should succeed..." );
+ jsTest.log("Trying merges that should succeed...");
-assert( admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : MinKey }, { _id : 10 }] }).ok );
+ assert(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: 10}]}).ok);
-assert.eq( 3, staleCollection.find().itcount() );
+ assert.eq(3, staleCollection.find().itcount());
-// S0: min->10, 20->40, 40->50, 50->90, 100->110, 110->max
-// S1: 10->20, 90->100
+ // S0: min->10, 20->40, 40->50, 50->90, 100->110, 110->max
+ // S1: 10->20, 90->100
-// Make sure merging three chunks is valid.
+ // Make sure merging three chunks is valid.
-jsTest.log(tojson( admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 20 }, { _id : 90 }] }) ));
+ jsTest.log(tojson(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 20}, {_id: 90}]})));
-// S0: min->10, 20->90, 100->110, 110->max
-// S1: 10->20, 90->100
+ // S0: min->10, 20->90, 100->110, 110->max
+ // S1: 10->20, 90->100
-assert.eq( 3, staleCollection.find().itcount() );
+ assert.eq(3, staleCollection.find().itcount());
-assert( admin.runCommand({ mergeChunks : coll + "",
- bounds : [{ _id : 100 }, { _id : MaxKey }] }).ok );
+ assert(admin.runCommand({mergeChunks: coll + "", bounds: [{_id: 100}, {_id: MaxKey}]}).ok);
-assert.eq( 3, staleCollection.find().itcount() );
+ assert.eq(3, staleCollection.find().itcount());
-// S0: min->10, 20->90, 100->max
-// S1: 10->20, 90->100
+ // S0: min->10, 20->90, 100->max
+ // S1: 10->20, 90->100
-st.printShardingStatus();
+ st.printShardingStatus();
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/merge_chunks_test_with_md_ops.js b/jstests/sharding/merge_chunks_test_with_md_ops.js
index ddcdfbaf0c2..591413a109c 100644
--- a/jstests/sharding/merge_chunks_test_with_md_ops.js
+++ b/jstests/sharding/merge_chunks_test_with_md_ops.js
@@ -1,57 +1,55 @@
// Tests that merging chunks does not prevent cluster from doing other metadata ops
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards: 2 });
+ var st = new ShardingTest({shards: 2});
-var mongos = st.s0;
-var admin = mongos.getDB("admin");
-var shards = mongos.getCollection("config.shards").find().toArray();
-var coll = mongos.getCollection("foo.bar");
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var shards = mongos.getCollection("config.shards").find().toArray();
+ var coll = mongos.getCollection("foo.bar");
-assert.commandWorked(admin.runCommand({ enableSharding: coll.getDB() + "" }));
-st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
-assert.commandWorked(admin.runCommand({ shardCollection: coll + "", key: { _id: 1 } }));
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}));
-st.printShardingStatus();
+ st.printShardingStatus();
-// Split and merge the first chunk repeatedly
-jsTest.log("Splitting and merging repeatedly...");
+ // Split and merge the first chunk repeatedly
+ jsTest.log("Splitting and merging repeatedly...");
-for (var i = 0; i < 5; i++) {
- assert.commandWorked(admin.runCommand({ split: coll + "", middle: { _id: i } }));
- assert.commandWorked(admin.runCommand({ mergeChunks: coll + "",
- bounds: [ { _id: MinKey }, { _id: MaxKey } ] }));
- printjson(mongos.getDB("config").chunks.find().toArray());
-}
+ for (var i = 0; i < 5; i++) {
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
+ assert.commandWorked(
+ admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: MaxKey}]}));
+ printjson(mongos.getDB("config").chunks.find().toArray());
+ }
-// Move the first chunk to the other shard
-jsTest.log("Moving to another shard...");
+ // Move the first chunk to the other shard
+ jsTest.log("Moving to another shard...");
-assert.commandWorked(admin.runCommand({ moveChunk: coll + "",
- find: { _id: 0 },
- to: shards[1]._id }));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id}));
-// Split and merge the chunk repeatedly
-jsTest.log("Splitting and merging repeatedly (again)...");
+ // Split and merge the chunk repeatedly
+ jsTest.log("Splitting and merging repeatedly (again)...");
-for (var i = 0; i < 5; i++) {
- assert.commandWorked(admin.runCommand({ split: coll + "", middle: { _id: i } }));
- assert.commandWorked(admin.runCommand({ mergeChunks: coll + "",
- bounds: [{ _id: MinKey }, { _id: MaxKey }] }));
- printjson(mongos.getDB("config").chunks.find().toArray());
-}
+ for (var i = 0; i < 5; i++) {
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {_id: i}}));
+ assert.commandWorked(
+ admin.runCommand({mergeChunks: coll + "", bounds: [{_id: MinKey}, {_id: MaxKey}]}));
+ printjson(mongos.getDB("config").chunks.find().toArray());
+ }
-// Move the chunk back to the original shard
-jsTest.log("Moving to original shard...");
+ // Move the chunk back to the original shard
+ jsTest.log("Moving to original shard...");
-assert.commandWorked(admin.runCommand({ moveChunk: coll + "",
- find: { _id: 0 },
- to: shards[0]._id }));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[0]._id}));
-st.printShardingStatus();
+ st.printShardingStatus();
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js
index 73a292033bc..8895d14c0d6 100644
--- a/jstests/sharding/migrateBig.js
+++ b/jstests/sharding/migrateBig.js
@@ -1,80 +1,80 @@
(function() {
-var s = new ShardingTest({ name: "migrateBig",
- shards: 2,
- other: { chunkSize: 1 } });
+ var s = new ShardingTest({name: "migrateBig", shards: 2, other: {chunkSize: 1}});
-s.config.settings.update( { _id: "balancer" }, { $set : { _waitForDelete : true } } , true);
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { x : 1 } } );
+ s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true);
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {x: 1}});
-db = s.getDB( "test" );
-coll = db.foo;
+ db = s.getDB("test");
+ coll = db.foo;
-big = "";
-while ( big.length < 10000 )
- big += "eliot";
+ big = "";
+ while (big.length < 10000)
+ big += "eliot";
-var bulk = coll.initializeUnorderedBulkOp();
-for ( x=0; x<100; x++ ) {
- bulk.insert( { x : x , big : big } );
-}
-assert.writeOK(bulk.execute());
-
-s.printShardingStatus();
-
-s.adminCommand( { split : "test.foo" , middle : { x : 30 } } );
-s.adminCommand( { split : "test.foo" , middle : { x : 66 } } );
-s.adminCommand( { movechunk : "test.foo" ,
- find : { x : 90 } ,
- to : s.getOther( s.getPrimaryShard( "test" ) ).name } );
-
-s.printShardingStatus();
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (x = 0; x < 100; x++) {
+ bulk.insert({x: x, big: big});
+ }
+ assert.writeOK(bulk.execute());
-print( "YO : " + s.getPrimaryShard( "test" ).host );
-direct = new Mongo( s.getPrimaryShard( "test" ).host );
-print( "direct : " + direct );
+ s.printShardingStatus();
-directDB = direct.getDB( "test" );
+ s.adminCommand({split: "test.foo", middle: {x: 30}});
+ s.adminCommand({split: "test.foo", middle: {x: 66}});
+ s.adminCommand(
+ {movechunk: "test.foo", find: {x: 90}, to: s.getOther(s.getPrimaryShard("test")).name});
-for ( done=0; done<2*1024*1024; done+=big.length ){
- assert.writeOK(directDB.foo.insert( { x : 50 + Math.random() , big : big } ));
-}
+ s.printShardingStatus();
-s.printShardingStatus();
+ print("YO : " + s.getPrimaryShard("test").host);
+ direct = new Mongo(s.getPrimaryShard("test").host);
+ print("direct : " + direct);
-assert.throws( function(){
- s.adminCommand({ movechunk : "test.foo" ,
- find : { x : 50 } ,
- to : s.getOther( s.getPrimaryShard( "test" ) ).name });
-}, [], "move should fail" );
+ directDB = direct.getDB("test");
-for ( i=0; i<20; i+= 2 ) {
- try {
- s.adminCommand( { split : "test.foo" , middle : { x : i } } );
+ for (done = 0; done < 2 * 1024 * 1024; done += big.length) {
+ assert.writeOK(directDB.foo.insert({x: 50 + Math.random(), big: big}));
}
- catch ( e ) {
- // we may have auto split on some of these
- // which is ok
- print(e);
+
+ s.printShardingStatus();
+
+ assert.throws(function() {
+ s.adminCommand({
+ movechunk: "test.foo",
+ find: {x: 50},
+ to: s.getOther(s.getPrimaryShard("test")).name
+ });
+ }, [], "move should fail");
+
+ for (i = 0; i < 20; i += 2) {
+ try {
+ s.adminCommand({split: "test.foo", middle: {x: i}});
+ } catch (e) {
+ // we may have auto split on some of these
+ // which is ok
+ print(e);
+ }
}
-}
-s.printShardingStatus();
+ s.printShardingStatus();
-s.config.settings.update( { _id: "balancer" }, { $set : { stopped: false } } , true );
+ s.config.settings.update({_id: "balancer"}, {$set: {stopped: false}}, true);
-assert.soon( function(){
- var x = s.chunkDiff( "foo" , "test" );
- print( "chunk diff: " + x );
- return x < 2;
-}, "no balance happened" , 8 * 60 * 1000 , 2000 );
+ assert.soon(function() {
+ var x = s.chunkDiff("foo", "test");
+ print("chunk diff: " + x);
+ return x < 2;
+ }, "no balance happened", 8 * 60 * 1000, 2000);
-assert.soon( function(){ return !s.isAnyBalanceInFlight(); } );
+ assert.soon(function() {
+ return !s.isAnyBalanceInFlight();
+ });
-assert.eq( coll.count() , coll.find().itcount() );
+ assert.eq(coll.count(), coll.find().itcount());
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/migrateBig_balancer.js b/jstests/sharding/migrateBig_balancer.js
index 5512eb883db..cd44a225a62 100644
--- a/jstests/sharding/migrateBig_balancer.js
+++ b/jstests/sharding/migrateBig_balancer.js
@@ -1,68 +1,72 @@
(function() {
-var st = new ShardingTest({ name: 'migrateBig_balancer',
- shards: 2,
- other: { enableBalancer: true } });
-var mongos = st.s;
+ var st =
+ new ShardingTest({name: 'migrateBig_balancer', shards: 2, other: {enableBalancer: true}});
+ var mongos = st.s;
-var admin = mongos.getDB("admin");
-db = mongos.getDB("test");
-var coll = db.getCollection("stuff");
+ var admin = mongos.getDB("admin");
+ db = mongos.getDB("test");
+ var coll = db.getCollection("stuff");
-assert.commandWorked(admin.runCommand({ enablesharding : coll.getDB().getName() }));
-st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
+ assert.commandWorked(admin.runCommand({enablesharding: coll.getDB().getName()}));
+ st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-var data = "x";
-var nsq = 16;
-var n = 255;
+ var data = "x";
+ var nsq = 16;
+ var n = 255;
-for( var i = 0; i < nsq; i++ ) data += data;
+ for (var i = 0; i < nsq; i++)
+ data += data;
-dataObj = {};
-for( var i = 0; i < n; i++ ) dataObj["data-" + i] = data;
+ dataObj = {};
+ for (var i = 0; i < n; i++)
+ dataObj["data-" + i] = data;
-var bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < 40; i++ ) {
- bulk.insert({ data: dataObj });
-}
-assert.writeOK(bulk.execute());
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < 40; i++) {
+ bulk.insert({data: dataObj});
+ }
+ assert.writeOK(bulk.execute());
-assert.eq( 40 , coll.count() , "prep1" );
+ assert.eq(40, coll.count(), "prep1");
-printjson( coll.stats() );
+ printjson(coll.stats());
-admin.printShardingStatus();
+ admin.printShardingStatus();
-admin.runCommand({ shardcollection : "" + coll, key : { _id : 1 } });
+ admin.runCommand({shardcollection: "" + coll, key: {_id: 1}});
-assert.lt( 5 , mongos.getDB( "config" ).chunks.find( { ns : "test.stuff" } ).count() , "not enough chunks" );
+ assert.lt(
+ 5, mongos.getDB("config").chunks.find({ns: "test.stuff"}).count(), "not enough chunks");
-assert.soon(
- function() {
- // On *extremely* slow or variable systems, we've seen migrations fail in the critical section and
+ assert.soon(function() {
+ // On *extremely* slow or variable systems, we've seen migrations fail in the critical
+ // section and
// kill the server. Do an explicit check for this. SERVER-8781
// TODO: Remove once we can better specify what systems to run what tests on.
try {
- assert.commandWorked(st.shard0.getDB("admin").runCommand({ ping: 1 }));
- assert.commandWorked(st.shard1.getDB("admin").runCommand({ ping: 1 }));
- }
- catch(e) {
+ assert.commandWorked(st.shard0.getDB("admin").runCommand({ping: 1}));
+ assert.commandWorked(st.shard1.getDB("admin").runCommand({ping: 1}));
+ } catch (e) {
print("An error occurred contacting a shard during balancing," +
" this may be due to slow disk I/O, aborting test.");
throw e;
}
-
- res = mongos.getDB( "config" ).chunks.group( { cond : { ns : "test.stuff" } ,
- key : { shard : 1 } ,
- reduce : function( doc , out ){ out.nChunks++; } ,
- initial : { nChunks : 0 } } );
-
- printjson( res );
- return res.length > 1 && Math.abs( res[0].nChunks - res[1].nChunks ) <= 3;
-
- } ,
- "never migrated" , 10 * 60 * 1000 , 1000 );
-
-st.stop();
+
+ res = mongos.getDB("config").chunks.group({
+ cond: {ns: "test.stuff"},
+ key: {shard: 1},
+ reduce: function(doc, out) {
+ out.nChunks++;
+ },
+ initial: {nChunks: 0}
+ });
+
+ printjson(res);
+ return res.length > 1 && Math.abs(res[0].nChunks - res[1].nChunks) <= 3;
+
+ }, "never migrated", 10 * 60 * 1000, 1000);
+
+ st.stop();
})();
diff --git a/jstests/sharding/migrate_overwrite_id.js b/jstests/sharding/migrate_overwrite_id.js
index 26afd8258ac..1d5bc2f3236 100644
--- a/jstests/sharding/migrate_overwrite_id.js
+++ b/jstests/sharding/migrate_overwrite_id.js
@@ -2,40 +2,40 @@
// Tests that a migration does not overwrite duplicate _ids on data transfer
//
-var st = new ShardingTest({ shards : 2, mongos : 1 });
+var st = new ShardingTest({shards: 2, mongos: 1});
st.stopBalancer();
var mongos = st.s0;
-var shards = mongos.getDB( "config" ).shards.find().toArray();
+var shards = mongos.getDB("config").shards.find().toArray();
shards[0].conn = st.shard0;
shards[1].conn = st.shard1;
-var admin = mongos.getDB( "admin" );
-var coll = mongos.getCollection( "foo.bar" );
+var admin = mongos.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
-assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
-printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
-assert( admin.runCommand({ shardCollection : coll + "", key : { skey : 1 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { skey : 0 } }).ok );
-assert( admin.runCommand({ moveChunk : coll + "", find : { skey : 0 }, to : shards[1]._id }).ok );
+assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id}));
+assert(admin.runCommand({shardCollection: coll + "", key: {skey: 1}}).ok);
+assert(admin.runCommand({split: coll + "", middle: {skey: 0}}).ok);
+assert(admin.runCommand({moveChunk: coll + "", find: {skey: 0}, to: shards[1]._id}).ok);
var id = 12345;
-jsTest.log( "Inserting a document with id : 12345 into both shards with diff shard key..." );
+jsTest.log("Inserting a document with id : 12345 into both shards with diff shard key...");
-assert.writeOK(coll.insert({ _id : id, skey : -1 }));
-assert.writeOK(coll.insert({ _id : id, skey : 1 }));
+assert.writeOK(coll.insert({_id: id, skey: -1}));
+assert.writeOK(coll.insert({_id: id, skey: 1}));
-printjson( shards[0].conn.getCollection( coll + "" ).find({ _id : id }).toArray() );
-printjson( shards[1].conn.getCollection( coll + "" ).find({ _id : id }).toArray() );
-assert.eq( 2, coll.find({ _id : id }).itcount() );
+printjson(shards[0].conn.getCollection(coll + "").find({_id: id}).toArray());
+printjson(shards[1].conn.getCollection(coll + "").find({_id: id}).toArray());
+assert.eq(2, coll.find({_id: id}).itcount());
-jsTest.log( "Moving both chunks to same shard..." );
+jsTest.log("Moving both chunks to same shard...");
-var result = admin.runCommand({ moveChunk : coll + "", find : { skey : -1 }, to : shards[1]._id });
-printjson( result );
+var result = admin.runCommand({moveChunk: coll + "", find: {skey: -1}, to: shards[1]._id});
+printjson(result);
-printjson( shards[0].conn.getCollection( coll + "" ).find({ _id : id }).toArray() );
-printjson( shards[1].conn.getCollection( coll + "" ).find({ _id : id }).toArray() );
-assert.eq( 2, coll.find({ _id : id }).itcount() );
+printjson(shards[0].conn.getCollection(coll + "").find({_id: id}).toArray());
+printjson(shards[1].conn.getCollection(coll + "").find({_id: id}).toArray());
+assert.eq(2, coll.find({_id: id}).itcount());
st.stop();
diff --git a/jstests/sharding/migration_failure.js b/jstests/sharding/migration_failure.js
index 0b9c950908b..97ab7ddf967 100644
--- a/jstests/sharding/migration_failure.js
+++ b/jstests/sharding/migration_failure.js
@@ -1,20 +1,20 @@
//
-// Tests that migration failures before and after commit correctly roll back
+// Tests that migration failures before and after commit correctly roll back
// when possible
//
-var st = new ShardingTest({ shards : 2, mongos : 1 });
+var st = new ShardingTest({shards: 2, mongos: 1});
st.stopBalancer();
var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var shards = mongos.getCollection( "config.shards" ).find().toArray();
-var coll = mongos.getCollection( "foo.bar" );
+var admin = mongos.getDB("admin");
+var shards = mongos.getCollection("config.shards").find().toArray();
+var coll = mongos.getCollection("foo.bar");
-assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
-printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : shards[0]._id }) );
-assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
+assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: shards[0]._id}));
+assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
+assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
st.printShardingStatus();
@@ -23,58 +23,46 @@ jsTest.log("Testing failed migrations...");
var version = null;
var failVersion = null;
-assert.commandWorked(
- st.shard0.getDB("admin").runCommand({
- configureFailPoint : 'failMigrationCommit', mode : 'alwaysOn' }));
+assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'failMigrationCommit', mode: 'alwaysOn'}));
-version = st.shard0.getDB("admin").runCommand({ getShardVersion : coll.toString() });
+version = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()});
-assert.commandFailed( admin.runCommand({ moveChunk : coll + "",
- find : { _id : 0 },
- to : shards[1]._id }) );
+assert.commandFailed(admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id}));
-failVersion = st.shard0.getDB("admin").runCommand({ getShardVersion : coll.toString() });
+failVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()});
-assert.commandWorked(
- st.shard0.getDB("admin").runCommand({
- configureFailPoint : 'failMigrationCommit', mode : 'off' }));
+assert.commandWorked(st.shard0.getDB("admin")
+ .runCommand({configureFailPoint: 'failMigrationCommit', mode: 'off'}));
-assert.commandWorked(
- st.shard0.getDB("admin").runCommand({
- configureFailPoint : 'failMigrationConfigWritePrepare', mode : 'alwaysOn' }));
+assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'failMigrationConfigWritePrepare', mode: 'alwaysOn'}));
-version = st.shard0.getDB("admin").runCommand({ getShardVersion : coll.toString() });
+version = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()});
-assert.commandFailed( admin.runCommand({ moveChunk : coll + "",
- find : { _id : 0 },
- to : shards[1]._id }) );
+assert.commandFailed(admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id}));
-failVersion = st.shard0.getDB("admin").runCommand({ getShardVersion : coll.toString() });
+failVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()});
assert.eq(version.global, failVersion.global);
-assert.commandWorked(
- st.shard0.getDB("admin").runCommand({
- configureFailPoint : 'failMigrationConfigWritePrepare', mode : 'off' }));
+assert.commandWorked(st.shard0.getDB("admin").runCommand(
+ {configureFailPoint: 'failMigrationConfigWritePrepare', mode: 'off'}));
-assert.commandWorked(
- st.shard0.getDB("admin").runCommand({
- configureFailPoint : 'failApplyChunkOps', mode : 'alwaysOn' }));
+assert.commandWorked(st.shard0.getDB("admin")
+ .runCommand({configureFailPoint: 'failApplyChunkOps', mode: 'alwaysOn'}));
-version = st.shard0.getDB("admin").runCommand({ getShardVersion : coll.toString() });
+version = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()});
-assert.commandWorked( admin.runCommand({ moveChunk : coll + "",
- find : { _id : 0 },
- to : shards[1]._id }) );
+assert.commandWorked(admin.runCommand({moveChunk: coll + "", find: {_id: 0}, to: shards[1]._id}));
-failVersion = st.shard0.getDB("admin").runCommand({ getShardVersion : coll.toString() });
+failVersion = st.shard0.getDB("admin").runCommand({getShardVersion: coll.toString()});
assert.neq(version.global, failVersion.global);
-assert.commandWorked(
- st.shard0.getDB("admin").runCommand({
- configureFailPoint : 'failApplyChunkOps', mode : 'off' }));
+assert.commandWorked(st.shard0.getDB("admin")
+ .runCommand({configureFailPoint: 'failApplyChunkOps', mode: 'off'}));
-jsTest.log( "DONE!" );
+jsTest.log("DONE!");
st.stop();
diff --git a/jstests/sharding/migration_ignore_interrupts.js b/jstests/sharding/migration_ignore_interrupts.js
index 80724895810..04b92088b3b 100644
--- a/jstests/sharding/migration_ignore_interrupts.js
+++ b/jstests/sharding/migration_ignore_interrupts.js
@@ -7,331 +7,303 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
-"use strict";
-
-///////////////////////////////////////////////////////////////////////////////////////////////////
-// Starting setup
-///////////////////////////////////////////////////////////////////////////////////////////////////
-
-// Shard0:
-// coll1: [0, 10) [10, 20) [20, 30)
-// coll2: [0, 10) [10, 20)
-// Shard1:
-// Shard2:
-
-var staticMongod1 = MongoRunner.runMongod({}); // For startParallelOps.
-var staticMongod2 = MongoRunner.runMongod({}); // For startParallelOps.
-
-var st = new ShardingTest({ shards : 4, mongos : 1 });
-
-var mongos = st.s0,
- admin = mongos.getDB('admin'),
- shards = mongos.getCollection('config.shards').find().toArray(),
- dbName = "testDB",
- ns1 = dbName + ".foo",
- coll1 = mongos.getCollection(ns1),
- ns2 = dbName + ".baz",
- coll2 = mongos.getCollection(ns2),
- shard0 = st.shard0,
- shard1 = st.shard1,
- shard2 = st.shard2,
- shard0Coll1 = shard0.getCollection(ns1),
- shard0Coll2 = shard0.getCollection(ns2),
- shard1Coll1 = shard1.getCollection(ns1),
- shard1Coll2 = shard1.getCollection(ns2),
- shard2Coll1 = shard2.getCollection(ns1),
- shard2Coll2 = shard2.getCollection(ns2);
-
-assert.commandWorked(admin.runCommand({enableSharding: dbName}));
-st.ensurePrimaryShard(dbName, shards[0]._id);
-
-assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
-assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 10}}));
-assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 20}}));
-assert.commandWorked(admin.runCommand({shardCollection: ns2, key: {a: 1}}));
-assert.commandWorked(admin.runCommand({split: ns2, middle: {a: 10}}));
-
-assert.writeOK(coll1.insert({a: 0}));
-assert.writeOK(coll1.insert({a: 10}));
-assert.writeOK(coll1.insert({a: 20}));
-assert.eq(3, shard0Coll1.count());
-assert.eq(3, coll1.count());
-assert.writeOK(coll2.insert({a: 0}));
-assert.writeOK(coll2.insert({a: 10}));
-assert.eq(2, shard0Coll2.count());
-assert.eq(2, coll2.count());
-
-///////////////////////////////////////////////////////////////////////////////////////////////////
-// 1. When a migration is in process from shard0 to shard1 on coll1, shard2 is unable to
-// start a migration with either shard in the following cases:
-// 1. coll1 shard2 to shard0 -- coll1 is already locked.
-// 2. coll1 shard2 to shard1 -- coll1 is already locked.
-// 3. coll1 shard1 to shard2 -- coll1 is already locked.
-// 4. coll2 shard2 to shard1 -- shard1 can't receive two chunks simultaneously.
-// 5. coll2 shard0 to shard2 -- shard0 can't send two chunks simultaneously.
-///////////////////////////////////////////////////////////////////////////////////////////////////
-
-// Shard0:
-// coll1: [0, 10)
-// coll2: [0, 10)
-// Shard1:
-// coll1: [20, 30)
-// Shard2:
-// coll1: [10, 20)
-// coll2: [10, 20)
-
-assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 10}, to: shards[2]._id}));
-assert.commandWorked(admin.runCommand({moveChunk: ns2, find: {a: 10}, to: shards[2]._id}));
-assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 20}, to: shards[1]._id}));
-assert.eq(1, shard0Coll1.count());
-assert.eq(1, shard0Coll2.count());
-assert.eq(1, shard1Coll1.count());
-assert.eq(0, shard1Coll2.count());
-assert.eq(1, shard2Coll1.count());
-assert.eq(1, shard2Coll2.count());
-
-// Start a migration between shard0 and shard1 on coll1 and then pause it
-pauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
-var joinMoveChunk1 = moveChunkParallel(
- staticMongod1,
- st.s0.host,
- {a: 0},
- null,
- coll1.getFullName(),
- shards[1]._id);
-waitForMigrateStep(shard1, migrateStepNames.deletedPriorDataInRange);
-
-jsTest.log('Attempting to interrupt migration....');
-// Test 1.1
-assert.commandFailed(admin.runCommand({moveChunk: ns1, find: {a: 10}, to: shards[0]._id}),
- "(1.1) coll1 lock should have prevented simultaneous migrations in the collection.");
-// Test 1.2
-assert.commandFailed(admin.runCommand({moveChunk: ns1, find: {a: 10}, to: shards[1]._id}),
- "(1.2) coll1 lock should have prevented simultaneous migrations in the collection.");
-// Test 1.3
-assert.commandFailed(admin.runCommand({moveChunk: ns1, find: {a: 20}, to: shards[2]._id}),
- "(1.3) coll1 lock should have prevented simultaneous migrations in the collection.");
-// Test 1.4
-assert.commandFailed(admin.runCommand({moveChunk: ns2, find: {a: 10}, to: shards[1]._id}),
- "(1.4) A shard should not be able to be the recipient of two ongoing migrations");
-// Test 1.5
-assert.commandFailed(admin.runCommand({moveChunk: ns2, find: {a: 0}, to: shards[2]._id}),
- "(1.5) A shard should not be able to be the donor for two ongoing migrations.");
-
-// Finish migration
-unpauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
-assert.doesNotThrow(function() {
- joinMoveChunk1();
-});
-assert.eq(0, shard0Coll1.count());
-assert.eq(2, shard1Coll1.count());
-
-// Reset setup
-assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 0}, to: shards[0]._id}));
-assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 20}, to: shards[0]._id}));
-assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 10}, to: shards[0]._id}));
-assert.commandWorked(admin.runCommand({moveChunk: ns2, find: {a: 10}, to: shards[0]._id}));
-assert.eq(3, shard0Coll1.count());
-assert.eq(2, shard0Coll2.count());
-assert.eq(0, shard1Coll1.count());
-assert.eq(0, shard1Coll2.count());
-assert.eq(0, shard2Coll1.count());
-assert.eq(0, shard2Coll2.count());
-
-///////////////////////////////////////////////////////////////////////////////////////////////////
-// 2. When a migration between shard0 and shard1 is about to enter the commit phase, a
-// commit command from shard2 (different migration session ID) is rejected.
-///////////////////////////////////////////////////////////////////////////////////////////////////
-
-// Shard0:
-// coll1: [0, 10) [10, 20) [20, 30)
-// coll2: [0, 10) [10, 20)
-// Shard1:
-// Shard2:
-
-// Start a migration between shard0 and shard1 on coll1, pause in steady state before commit
-pauseMoveChunkAtStep(shard0, moveChunkStepNames.reachedSteadyState);
-joinMoveChunk1 = moveChunkParallel(
- staticMongod1,
- st.s0.host,
- {a: 0},
- null,
- coll1.getFullName(),
- shards[1]._id);
-waitForMoveChunkStep(shard0, moveChunkStepNames.reachedSteadyState);
-
-jsTest.log('Sending false commit command....');
-assert.commandFailed(shard2.adminCommand(
- {'_recvChunkCommit': 1, 'sessionId': "fake-migration-session-id"}));
-
-jsTest.log("Checking migration recipient is still in steady state, waiting for commit....");
-var res = shard1.adminCommand('_recvChunkStatus');
-assert.commandWorked(res);
-assert.eq(true, res.state === "steady", "False commit command succeeded");
-
-// Finish migration
-unpauseMoveChunkAtStep(shard0, moveChunkStepNames.reachedSteadyState);
-assert.doesNotThrow(function() {
- joinMoveChunk1();
-});
-assert.eq(2, shard0Coll1.count());
-assert.eq(1, shard1Coll1.count());
-
-// Reset setup
-assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 0}, to: shards[0]._id}));
-assert.eq(3, shard0Coll1.count());
-assert.eq(2, shard0Coll2.count());
-assert.eq(0, shard1Coll1.count());
-assert.eq(0, shard1Coll2.count());
-assert.eq(0, shard2Coll1.count());
-assert.eq(0, shard2Coll2.count());
-
-///////////////////////////////////////////////////////////////////////////////////////////////////
-// 3. If a donor aborts a migration to a recipient, the recipient does not realize the
-// migration has been aborted, and the donor moves on to a new migration, the original
-// recipient will then fail to clone documents from the donor.
-///////////////////////////////////////////////////////////////////////////////////////////////////
-
-// Shard0:
-// coll1: [0, 10) [10, 20) [20, 30)
-// coll2: [0, 10) [10, 20)
-// Shard1:
-// Shard2:
-
-// Start coll1 migration to shard1: pause recipient after delete step, donor before interrupt check
-pauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
-pauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
-joinMoveChunk1 = moveChunkParallel(
- staticMongod1,
- st.s0.host,
- {a: 0},
- null,
- coll1.getFullName(),
- shards[1]._id);
-waitForMigrateStep(shard1, migrateStepNames.deletedPriorDataInRange);
-
-// Abort migration on donor side, recipient is unaware
-var inProgressOps = admin.currentOp().inprog;
-var abortedMigration = false;
-for (var op in inProgressOps) {
- if (inProgressOps[op].query.moveChunk) {
- admin.killOp(inProgressOps[op].opid);
- abortedMigration = true;
+ "use strict";
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
+ // Starting setup
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
+
+ // Shard0:
+ // coll1: [0, 10) [10, 20) [20, 30)
+ // coll2: [0, 10) [10, 20)
+ // Shard1:
+ // Shard2:
+
+ var staticMongod1 = MongoRunner.runMongod({}); // For startParallelOps.
+ var staticMongod2 = MongoRunner.runMongod({}); // For startParallelOps.
+
+ var st = new ShardingTest({shards: 4, mongos: 1});
+
+ var mongos = st.s0, admin = mongos.getDB('admin'),
+ shards = mongos.getCollection('config.shards').find().toArray(), dbName = "testDB",
+ ns1 = dbName + ".foo", coll1 = mongos.getCollection(ns1), ns2 = dbName + ".baz",
+ coll2 = mongos.getCollection(ns2), shard0 = st.shard0, shard1 = st.shard1,
+ shard2 = st.shard2, shard0Coll1 = shard0.getCollection(ns1),
+ shard0Coll2 = shard0.getCollection(ns2), shard1Coll1 = shard1.getCollection(ns1),
+ shard1Coll2 = shard1.getCollection(ns2), shard2Coll1 = shard2.getCollection(ns1),
+ shard2Coll2 = shard2.getCollection(ns2);
+
+ assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+ st.ensurePrimaryShard(dbName, shards[0]._id);
+
+ assert.commandWorked(admin.runCommand({shardCollection: ns1, key: {a: 1}}));
+ assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 10}}));
+ assert.commandWorked(admin.runCommand({split: ns1, middle: {a: 20}}));
+ assert.commandWorked(admin.runCommand({shardCollection: ns2, key: {a: 1}}));
+ assert.commandWorked(admin.runCommand({split: ns2, middle: {a: 10}}));
+
+ assert.writeOK(coll1.insert({a: 0}));
+ assert.writeOK(coll1.insert({a: 10}));
+ assert.writeOK(coll1.insert({a: 20}));
+ assert.eq(3, shard0Coll1.count());
+ assert.eq(3, coll1.count());
+ assert.writeOK(coll2.insert({a: 0}));
+ assert.writeOK(coll2.insert({a: 10}));
+ assert.eq(2, shard0Coll2.count());
+ assert.eq(2, coll2.count());
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
+ // 1. When a migration is in process from shard0 to shard1 on coll1, shard2 is unable to
+ // start a migration with either shard in the following cases:
+ // 1. coll1 shard2 to shard0 -- coll1 is already locked.
+ // 2. coll1 shard2 to shard1 -- coll1 is already locked.
+ // 3. coll1 shard1 to shard2 -- coll1 is already locked.
+ // 4. coll2 shard2 to shard1 -- shard1 can't receive two chunks simultaneously.
+ // 5. coll2 shard0 to shard2 -- shard0 can't send two chunks simultaneously.
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
+
+ // Shard0:
+ // coll1: [0, 10)
+ // coll2: [0, 10)
+ // Shard1:
+ // coll1: [20, 30)
+ // Shard2:
+ // coll1: [10, 20)
+ // coll2: [10, 20)
+
+ assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 10}, to: shards[2]._id}));
+ assert.commandWorked(admin.runCommand({moveChunk: ns2, find: {a: 10}, to: shards[2]._id}));
+ assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 20}, to: shards[1]._id}));
+ assert.eq(1, shard0Coll1.count());
+ assert.eq(1, shard0Coll2.count());
+ assert.eq(1, shard1Coll1.count());
+ assert.eq(0, shard1Coll2.count());
+ assert.eq(1, shard2Coll1.count());
+ assert.eq(1, shard2Coll2.count());
+
+ // Start a migration between shard0 and shard1 on coll1 and then pause it
+ pauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
+ var joinMoveChunk1 = moveChunkParallel(
+ staticMongod1, st.s0.host, {a: 0}, null, coll1.getFullName(), shards[1]._id);
+ waitForMigrateStep(shard1, migrateStepNames.deletedPriorDataInRange);
+
+ jsTest.log('Attempting to interrupt migration....');
+ // Test 1.1
+ assert.commandFailed(
+ admin.runCommand({moveChunk: ns1, find: {a: 10}, to: shards[0]._id}),
+ "(1.1) coll1 lock should have prevented simultaneous migrations in the collection.");
+ // Test 1.2
+ assert.commandFailed(
+ admin.runCommand({moveChunk: ns1, find: {a: 10}, to: shards[1]._id}),
+ "(1.2) coll1 lock should have prevented simultaneous migrations in the collection.");
+ // Test 1.3
+ assert.commandFailed(
+ admin.runCommand({moveChunk: ns1, find: {a: 20}, to: shards[2]._id}),
+ "(1.3) coll1 lock should have prevented simultaneous migrations in the collection.");
+ // Test 1.4
+ assert.commandFailed(
+ admin.runCommand({moveChunk: ns2, find: {a: 10}, to: shards[1]._id}),
+ "(1.4) A shard should not be able to be the recipient of two ongoing migrations");
+ // Test 1.5
+ assert.commandFailed(
+ admin.runCommand({moveChunk: ns2, find: {a: 0}, to: shards[2]._id}),
+ "(1.5) A shard should not be able to be the donor for two ongoing migrations.");
+
+ // Finish migration
+ unpauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
+ assert.doesNotThrow(function() {
+ joinMoveChunk1();
+ });
+ assert.eq(0, shard0Coll1.count());
+ assert.eq(2, shard1Coll1.count());
+
+ // Reset setup
+ assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 0}, to: shards[0]._id}));
+ assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 20}, to: shards[0]._id}));
+ assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 10}, to: shards[0]._id}));
+ assert.commandWorked(admin.runCommand({moveChunk: ns2, find: {a: 10}, to: shards[0]._id}));
+ assert.eq(3, shard0Coll1.count());
+ assert.eq(2, shard0Coll2.count());
+ assert.eq(0, shard1Coll1.count());
+ assert.eq(0, shard1Coll2.count());
+ assert.eq(0, shard2Coll1.count());
+ assert.eq(0, shard2Coll2.count());
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
+ // 2. When a migration between shard0 and shard1 is about to enter the commit phase, a
+ // commit command from shard2 (different migration session ID) is rejected.
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
+
+ // Shard0:
+ // coll1: [0, 10) [10, 20) [20, 30)
+ // coll2: [0, 10) [10, 20)
+ // Shard1:
+ // Shard2:
+
+ // Start a migration between shard0 and shard1 on coll1, pause in steady state before commit
+ pauseMoveChunkAtStep(shard0, moveChunkStepNames.reachedSteadyState);
+ joinMoveChunk1 = moveChunkParallel(
+ staticMongod1, st.s0.host, {a: 0}, null, coll1.getFullName(), shards[1]._id);
+ waitForMoveChunkStep(shard0, moveChunkStepNames.reachedSteadyState);
+
+ jsTest.log('Sending false commit command....');
+ assert.commandFailed(
+ shard2.adminCommand({'_recvChunkCommit': 1, 'sessionId': "fake-migration-session-id"}));
+
+ jsTest.log("Checking migration recipient is still in steady state, waiting for commit....");
+ var res = shard1.adminCommand('_recvChunkStatus');
+ assert.commandWorked(res);
+ assert.eq(true, res.state === "steady", "False commit command succeeded");
+
+ // Finish migration
+ unpauseMoveChunkAtStep(shard0, moveChunkStepNames.reachedSteadyState);
+ assert.doesNotThrow(function() {
+ joinMoveChunk1();
+ });
+ assert.eq(2, shard0Coll1.count());
+ assert.eq(1, shard1Coll1.count());
+
+ // Reset setup
+ assert.commandWorked(admin.runCommand({moveChunk: ns1, find: {a: 0}, to: shards[0]._id}));
+ assert.eq(3, shard0Coll1.count());
+ assert.eq(2, shard0Coll2.count());
+ assert.eq(0, shard1Coll1.count());
+ assert.eq(0, shard1Coll2.count());
+ assert.eq(0, shard2Coll1.count());
+ assert.eq(0, shard2Coll2.count());
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
+ // 3. If a donor aborts a migration to a recipient, the recipient does not realize the
+ // migration has been aborted, and the donor moves on to a new migration, the original
+ // recipient will then fail to clone documents from the donor.
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
+
+ // Shard0:
+ // coll1: [0, 10) [10, 20) [20, 30)
+ // coll2: [0, 10) [10, 20)
+ // Shard1:
+ // Shard2:
+
+ // Start coll1 migration to shard1: pause recipient after delete step, donor before interrupt
+ // check
+ pauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
+ pauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
+ joinMoveChunk1 = moveChunkParallel(
+ staticMongod1, st.s0.host, {a: 0}, null, coll1.getFullName(), shards[1]._id);
+ waitForMigrateStep(shard1, migrateStepNames.deletedPriorDataInRange);
+
+ // Abort migration on donor side, recipient is unaware
+ var inProgressOps = admin.currentOp().inprog;
+ var abortedMigration = false;
+ for (var op in inProgressOps) {
+ if (inProgressOps[op].query.moveChunk) {
+ admin.killOp(inProgressOps[op].opid);
+ abortedMigration = true;
+ }
}
-}
-assert.eq(true, abortedMigration, "Failed to abort migration, current running ops: " +
- tojson(inProgressOps));
-unpauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
-assert.throws(function() {
- joinMoveChunk1();
-});
-
-// Start coll2 migration to shard2, pause recipient after delete step
-pauseMigrateAtStep(shard2, migrateStepNames.deletedPriorDataInRange);
-var joinMoveChunk2 = moveChunkParallel(
- staticMongod2,
- st.s0.host,
- {a: 0},
- null,
- coll2.getFullName(),
- shards[2]._id);
-waitForMigrateStep(shard2, migrateStepNames.deletedPriorDataInRange);
-
-jsTest.log('Releasing coll1 migration recipient, whose clone command should fail....');
-unpauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
-assert.eq(3, shard0Coll1.count(), "donor shard0 completed a migration that it aborted");
-assert.eq(0, shard1Coll1.count(), "shard1 cloned documents despite donor migration abortion");
-
-jsTest.log('Finishing coll2 migration, which should succeed....');
-unpauseMigrateAtStep(shard2, migrateStepNames.deletedPriorDataInRange);
-assert.doesNotThrow(function() {
- joinMoveChunk2();
-});
-assert.eq(1, shard0Coll2.count(), "donor shard0 failed to complete a migration " +
- "after aborting a prior migration");
-assert.eq(1, shard2Coll2.count(), "shard2 failed to complete migration");
-
-// Reset setup
-assert.commandWorked(admin.runCommand({moveChunk: ns2, find: {a: 0}, to: shards[0]._id}));
-assert.eq(3, shard0Coll1.count());
-assert.eq(2, shard0Coll2.count());
-assert.eq(0, shard1Coll1.count());
-assert.eq(0, shard1Coll2.count());
-assert.eq(0, shard2Coll1.count());
-assert.eq(0, shard2Coll2.count());
-
-///////////////////////////////////////////////////////////////////////////////////////////////////
-// 4. If a donor aborts a migration to a recipient, the recipient does not realize the
-// migration has been aborted, and the donor moves on to a new migration, the original
-// recipient will then fail to retrieve transferMods from the donor's xfermods log.
-///////////////////////////////////////////////////////////////////////////////////////////////////
-
-// Shard0:
-// coll1: [0, 10) [10, 20) [20, 30)
-// coll2: [0, 10) [10, 20)
-// Shard1:
-// Shard2:
-
-// Start coll1 migration to shard1: pause recipient after cloning, donor before interrupt check
-pauseMigrateAtStep(shard1, migrateStepNames.cloned);
-pauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
-joinMoveChunk1 = moveChunkParallel(
- staticMongod1,
- st.s0.host,
- {a: 0},
- null,
- coll1.getFullName(),
- shards[1]._id);
-waitForMigrateStep(shard1, migrateStepNames.cloned);
-
-// Abort migration on donor side, recipient is unaware
-inProgressOps = admin.currentOp().inprog;
-abortedMigration = false;
-for (var op in inProgressOps) {
- if (inProgressOps[op].query.moveChunk) {
- admin.killOp(inProgressOps[op].opid);
- abortedMigration = true;
+ assert.eq(true,
+ abortedMigration,
+ "Failed to abort migration, current running ops: " + tojson(inProgressOps));
+ unpauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
+ assert.throws(function() {
+ joinMoveChunk1();
+ });
+
+ // Start coll2 migration to shard2, pause recipient after delete step
+ pauseMigrateAtStep(shard2, migrateStepNames.deletedPriorDataInRange);
+ var joinMoveChunk2 = moveChunkParallel(
+ staticMongod2, st.s0.host, {a: 0}, null, coll2.getFullName(), shards[2]._id);
+ waitForMigrateStep(shard2, migrateStepNames.deletedPriorDataInRange);
+
+ jsTest.log('Releasing coll1 migration recipient, whose clone command should fail....');
+ unpauseMigrateAtStep(shard1, migrateStepNames.deletedPriorDataInRange);
+ assert.eq(3, shard0Coll1.count(), "donor shard0 completed a migration that it aborted");
+ assert.eq(0, shard1Coll1.count(), "shard1 cloned documents despite donor migration abortion");
+
+ jsTest.log('Finishing coll2 migration, which should succeed....');
+ unpauseMigrateAtStep(shard2, migrateStepNames.deletedPriorDataInRange);
+ assert.doesNotThrow(function() {
+ joinMoveChunk2();
+ });
+ assert.eq(1,
+ shard0Coll2.count(),
+ "donor shard0 failed to complete a migration " + "after aborting a prior migration");
+ assert.eq(1, shard2Coll2.count(), "shard2 failed to complete migration");
+
+ // Reset setup
+ assert.commandWorked(admin.runCommand({moveChunk: ns2, find: {a: 0}, to: shards[0]._id}));
+ assert.eq(3, shard0Coll1.count());
+ assert.eq(2, shard0Coll2.count());
+ assert.eq(0, shard1Coll1.count());
+ assert.eq(0, shard1Coll2.count());
+ assert.eq(0, shard2Coll1.count());
+ assert.eq(0, shard2Coll2.count());
+
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
+ // 4. If a donor aborts a migration to a recipient, the recipient does not realize the
+ // migration has been aborted, and the donor moves on to a new migration, the original
+ // recipient will then fail to retrieve transferMods from the donor's xfermods log.
+ ///////////////////////////////////////////////////////////////////////////////////////////////////
+
+ // Shard0:
+ // coll1: [0, 10) [10, 20) [20, 30)
+ // coll2: [0, 10) [10, 20)
+ // Shard1:
+ // Shard2:
+
+ // Start coll1 migration to shard1: pause recipient after cloning, donor before interrupt check
+ pauseMigrateAtStep(shard1, migrateStepNames.cloned);
+ pauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
+ joinMoveChunk1 = moveChunkParallel(
+ staticMongod1, st.s0.host, {a: 0}, null, coll1.getFullName(), shards[1]._id);
+ waitForMigrateStep(shard1, migrateStepNames.cloned);
+
+ // Abort migration on donor side, recipient is unaware
+ inProgressOps = admin.currentOp().inprog;
+ abortedMigration = false;
+ for (var op in inProgressOps) {
+ if (inProgressOps[op].query.moveChunk) {
+ admin.killOp(inProgressOps[op].opid);
+ abortedMigration = true;
+ }
}
-}
-assert.eq(true, abortedMigration, "Failed to abort migration, current running ops: " +
- tojson(inProgressOps));
-unpauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
-assert.throws(function() {
- joinMoveChunk1();
-});
-
-// Start coll2 migration to shard2, pause recipient after cloning step
-pauseMigrateAtStep(shard2, migrateStepNames.cloned);
-var joinMoveChunk2 = moveChunkParallel(
- staticMongod2,
- st.s0.host,
- {a: 0},
- null,
- coll2.getFullName(),
- shards[2]._id);
-waitForMigrateStep(shard2, migrateStepNames.cloned);
-
-// Populate donor (shard0) xfermods log.
-assert.writeOK(coll2.insert({a: 1}));
-assert.writeOK(coll2.insert({a: 2}));
-assert.eq(4, coll2.count(), "Failed to insert documents into coll2");
-assert.eq(4, shard0Coll2.count());
-
-jsTest.log('Releasing coll1 migration recipient, whose transferMods command should fail....');
-unpauseMigrateAtStep(shard1, migrateStepNames.cloned);
-assert.eq(3, shard0Coll1.count(), "donor shard0 completed a migration that it aborted");
-assert.eq(1, shard1Coll1.count(), "shard1 accessed the xfermods log despite " +
- "donor migration abortion");
-
-jsTest.log('Finishing coll2 migration, which should succeed....');
-unpauseMigrateAtStep(shard2, migrateStepNames.cloned);
-assert.doesNotThrow(function() {
- joinMoveChunk2();
-});
-assert.eq(1, shard0Coll2.count(), "donor shard0 failed to complete a migration " +
- "after aborting a prior migration");
-assert.eq(3, shard2Coll2.count(), "shard2 failed to complete migration");
-
-st.stop();
+ assert.eq(true,
+ abortedMigration,
+ "Failed to abort migration, current running ops: " + tojson(inProgressOps));
+ unpauseMoveChunkAtStep(shard0, moveChunkStepNames.startedMoveChunk);
+ assert.throws(function() {
+ joinMoveChunk1();
+ });
+
+ // Start coll2 migration to shard2, pause recipient after cloning step
+ pauseMigrateAtStep(shard2, migrateStepNames.cloned);
+ var joinMoveChunk2 = moveChunkParallel(
+ staticMongod2, st.s0.host, {a: 0}, null, coll2.getFullName(), shards[2]._id);
+ waitForMigrateStep(shard2, migrateStepNames.cloned);
+
+ // Populate donor (shard0) xfermods log.
+ assert.writeOK(coll2.insert({a: 1}));
+ assert.writeOK(coll2.insert({a: 2}));
+ assert.eq(4, coll2.count(), "Failed to insert documents into coll2");
+ assert.eq(4, shard0Coll2.count());
+
+ jsTest.log('Releasing coll1 migration recipient, whose transferMods command should fail....');
+ unpauseMigrateAtStep(shard1, migrateStepNames.cloned);
+ assert.eq(3, shard0Coll1.count(), "donor shard0 completed a migration that it aborted");
+ assert.eq(1,
+ shard1Coll1.count(),
+ "shard1 accessed the xfermods log despite " + "donor migration abortion");
+
+ jsTest.log('Finishing coll2 migration, which should succeed....');
+ unpauseMigrateAtStep(shard2, migrateStepNames.cloned);
+ assert.doesNotThrow(function() {
+ joinMoveChunk2();
+ });
+ assert.eq(1,
+ shard0Coll2.count(),
+ "donor shard0 failed to complete a migration " + "after aborting a prior migration");
+ assert.eq(3, shard2Coll2.count(), "shard2 failed to complete migration");
+
+ st.stop();
})();
diff --git a/jstests/sharding/migration_sets_fromMigrate_flag.js b/jstests/sharding/migration_sets_fromMigrate_flag.js
index c9143aac67c..55dbca8b5fa 100644
--- a/jstests/sharding/migration_sets_fromMigrate_flag.js
+++ b/jstests/sharding/migration_sets_fromMigrate_flag.js
@@ -17,150 +17,151 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
-"use strict";
-
-var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-
-/**
- * Start up new sharded cluster, stop balancer that would interfere in manual chunk management.
- */
-
-var st = new ShardingTest({ shards : 2, mongos : 1, rs : { nodes : 3 } });
-st.stopBalancer();
-
-var mongos = st.s0,
- admin = mongos.getDB('admin'),
- shards = mongos.getCollection('config.shards').find().toArray(),
- dbName = "testDB",
- ns = dbName + ".foo",
- coll = mongos.getCollection(ns),
- donor = st.shard0,
- recipient = st.shard1,
- donorColl = donor.getCollection(ns),
- recipientColl = recipient.getCollection(ns),
- donorLocal = donor.getDB('local'),
- recipientLocal = recipient.getDB('local');
-
-// Two chunks
-// Donor: [0, 2) [2, 5)
-// Recipient:
-jsTest.log('Enable sharding of the collection and pre-split into two chunks....');
-
-assert.commandWorked(admin.runCommand({enableSharding: dbName}));
-st.ensurePrimaryShard(dbName, shards[0]._id);
-assert.commandWorked(donorColl.createIndex({_id: 1}));
-assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
-assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 2}}));
-
-// 6 documents,
-// donor: 2 in the first chunk, 3 in the second.
-// recipient: 1 document (shardkey overlaps with a doc in second chunk of donor shard)
-jsTest.log('Inserting 5 docs into donor shard, 1 doc into the recipient shard....');
-
-for (var i = 0; i < 5; ++i)
- assert.writeOK(coll.insert({_id: i}));
-assert.eq(5, donorColl.count());
-
-for (var i = 2; i < 3; ++i)
- assert.writeOK(recipientColl.insert({_id: i}));
-assert.eq(1, recipientColl.count());
-
-/**
- * Set failpoint: recipient will pause migration after cloning chunk data from donor,
- * before checking transfer mods log on donor.
- */
-
-jsTest.log('setting recipient failpoint cloned');
-pauseMigrateAtStep(recipient, migrateStepNames.cloned);
-
-/**
- * Start moving chunk [2, 5) from donor shard to recipient shard, run in the background.
- */
-
-// Donor: [0, 2)
-// Recipient: [2, 5)
-jsTest.log('Starting chunk migration, pause after cloning...');
-
-var joinMoveChunk = moveChunkParallel(
- staticMongod,
- st.s0.host,
- {_id: 2},
- null,
- coll.getFullName(),
- shards[1]._id);
-
-/**
- * Wait for recipient to finish cloning.
- * THEN update 1 document {_id: 3} on donor within the currently migrating chunk.
- * AND delete 1 document {_id: 4} on donor within the currently migrating chunk.
- */
-
-waitForMigrateStep(recipient, migrateStepNames.cloned);
-
-jsTest.log('Update 1 doc and delete 1 doc on donor within the currently migrating chunk...');
-
-assert.writeOK(coll.update({_id: 3}, {_id: 3, a: "updated doc"}));
-assert.writeOK(coll.remove({_id: 4}));
-
-/**
- * Finish migration. Unpause recipient migration, wait for it to collect
- * the transfer mods log from donor and finish migration.
- */
-
-jsTest.log('Continuing and finishing migration...');
-unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
-joinMoveChunk();
-
-/**
- * Check documents are where they should be: 2 docs in donor chunk, 2 docs in recipient chunk
- * (because third doc in recipient shard's chunk got deleted on the donor shard during migration).
- */
-
-jsTest.log('Checking that documents are on the shards they should be...');
-
-assert.eq(2, recipientColl.count(), "Recipient shard doesn't have exactly 2 documents!");
-assert.eq(2, donorColl.count(), "Donor shard doesn't have exactly 2 documents!");
-assert.eq(4, coll.count(), "Collection total is not 4!");
-
-/**
- * Check that the fromMigrate flag has been set correctly in donor and recipient oplogs,
- */
-
-jsTest.log('Checking donor and recipient oplogs for correct fromMigrate flags...');
-
-var donorOplogRes = donorLocal.oplog.rs.find(
- {op: 'd', fromMigrate: true, 'o._id': 2}).count();
-assert.eq(1, donorOplogRes, "fromMigrate flag wasn't set on the donor shard's oplog for " +
- "migrating delete op on {_id: 2}! Test #2 failed.");
-
-donorOplogRes = donorLocal.oplog.rs.find(
- {op: 'd', fromMigrate: {$exists: false}, 'o._id': 4}).count();
-assert.eq(1, donorOplogRes, "Real delete of {_id: 4} on donor shard incorrectly set the " +
- "fromMigrate flag in the oplog! Test #5 failed.");
-
-var recipientOplogRes = recipientLocal.oplog.rs.find(
- {op: 'i', fromMigrate: true, 'o._id': 2}).count();
-assert.eq(1, recipientOplogRes, "fromMigrate flag wasn't set on the recipient shard's " +
- "oplog for migrating insert op on {_id: 2}! Test #3 failed.");
-
-recipientOplogRes = recipientLocal.oplog.rs.find(
- {op: 'd', fromMigrate: true, 'o._id': 2}).count();
-assert.eq(1, recipientOplogRes, "fromMigrate flag wasn't set on the recipient shard's " +
- "oplog for delete op on the old {_id: 2} that overlapped " +
- "with the chunk about to be copied! Test #1 failed.");
-
-recipientOplogRes = recipientLocal.oplog.rs.find(
- {op: 'u', fromMigrate: true, 'o._id': 3}).count();
-assert.eq(1, recipientOplogRes, "fromMigrate flag wasn't set on the recipient shard's " +
- "oplog for update op on {_id: 3}! Test #4 failed.");
-
-recipientOplogRes = recipientLocal.oplog.rs.find(
- {op: 'd', fromMigrate: true, 'o._id': 4}).count();
-assert.eq(1, recipientOplogRes, "fromMigrate flag wasn't set on the recipient shard's " +
- "oplog for delete op on {_id: 4} that occurred during " +
- "migration! Test #5 failed.");
-
-jsTest.log('DONE!');
-st.stop();
+ "use strict";
+
+ var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+
+ /**
+ * Start up new sharded cluster, stop balancer that would interfere in manual chunk management.
+ */
+
+ var st = new ShardingTest({shards: 2, mongos: 1, rs: {nodes: 3}});
+ st.stopBalancer();
+
+ var mongos = st.s0, admin = mongos.getDB('admin'),
+ shards = mongos.getCollection('config.shards').find().toArray(), dbName = "testDB",
+ ns = dbName + ".foo", coll = mongos.getCollection(ns), donor = st.shard0,
+ recipient = st.shard1, donorColl = donor.getCollection(ns),
+ recipientColl = recipient.getCollection(ns), donorLocal = donor.getDB('local'),
+ recipientLocal = recipient.getDB('local');
+
+ // Two chunks
+ // Donor: [0, 2) [2, 5)
+ // Recipient:
+ jsTest.log('Enable sharding of the collection and pre-split into two chunks....');
+
+ assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+ st.ensurePrimaryShard(dbName, shards[0]._id);
+ assert.commandWorked(donorColl.createIndex({_id: 1}));
+ assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
+ assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 2}}));
+
+ // 6 documents,
+ // donor: 2 in the first chunk, 3 in the second.
+ // recipient: 1 document (shardkey overlaps with a doc in second chunk of donor shard)
+ jsTest.log('Inserting 5 docs into donor shard, 1 doc into the recipient shard....');
+
+ for (var i = 0; i < 5; ++i)
+ assert.writeOK(coll.insert({_id: i}));
+ assert.eq(5, donorColl.count());
+
+ for (var i = 2; i < 3; ++i)
+ assert.writeOK(recipientColl.insert({_id: i}));
+ assert.eq(1, recipientColl.count());
+
+ /**
+ * Set failpoint: recipient will pause migration after cloning chunk data from donor,
+ * before checking transfer mods log on donor.
+ */
+
+ jsTest.log('setting recipient failpoint cloned');
+ pauseMigrateAtStep(recipient, migrateStepNames.cloned);
+
+ /**
+ * Start moving chunk [2, 5) from donor shard to recipient shard, run in the background.
+ */
+
+ // Donor: [0, 2)
+ // Recipient: [2, 5)
+ jsTest.log('Starting chunk migration, pause after cloning...');
+
+ var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {_id: 2}, null, coll.getFullName(), shards[1]._id);
+
+ /**
+ * Wait for recipient to finish cloning.
+ * THEN update 1 document {_id: 3} on donor within the currently migrating chunk.
+ * AND delete 1 document {_id: 4} on donor within the currently migrating chunk.
+ */
+
+ waitForMigrateStep(recipient, migrateStepNames.cloned);
+
+ jsTest.log('Update 1 doc and delete 1 doc on donor within the currently migrating chunk...');
+
+ assert.writeOK(coll.update({_id: 3}, {_id: 3, a: "updated doc"}));
+ assert.writeOK(coll.remove({_id: 4}));
+
+ /**
+ * Finish migration. Unpause recipient migration, wait for it to collect
+ * the transfer mods log from donor and finish migration.
+ */
+
+ jsTest.log('Continuing and finishing migration...');
+ unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
+ joinMoveChunk();
+
+ /**
+ * Check documents are where they should be: 2 docs in donor chunk, 2 docs in recipient chunk
+ * (because third doc in recipient shard's chunk got deleted on the donor shard during
+ * migration).
+ */
+
+ jsTest.log('Checking that documents are on the shards they should be...');
+
+ assert.eq(2, recipientColl.count(), "Recipient shard doesn't have exactly 2 documents!");
+ assert.eq(2, donorColl.count(), "Donor shard doesn't have exactly 2 documents!");
+ assert.eq(4, coll.count(), "Collection total is not 4!");
+
+ /**
+ * Check that the fromMigrate flag has been set correctly in donor and recipient oplogs,
+ */
+
+ jsTest.log('Checking donor and recipient oplogs for correct fromMigrate flags...');
+
+ var donorOplogRes = donorLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 2}).count();
+ assert.eq(1,
+ donorOplogRes,
+ "fromMigrate flag wasn't set on the donor shard's oplog for " +
+ "migrating delete op on {_id: 2}! Test #2 failed.");
+
+ donorOplogRes =
+ donorLocal.oplog.rs.find({op: 'd', fromMigrate: {$exists: false}, 'o._id': 4}).count();
+ assert.eq(1,
+ donorOplogRes,
+ "Real delete of {_id: 4} on donor shard incorrectly set the " +
+ "fromMigrate flag in the oplog! Test #5 failed.");
+
+ var recipientOplogRes =
+ recipientLocal.oplog.rs.find({op: 'i', fromMigrate: true, 'o._id': 2}).count();
+ assert.eq(1,
+ recipientOplogRes,
+ "fromMigrate flag wasn't set on the recipient shard's " +
+ "oplog for migrating insert op on {_id: 2}! Test #3 failed.");
+
+ recipientOplogRes =
+ recipientLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 2}).count();
+ assert.eq(1,
+ recipientOplogRes,
+ "fromMigrate flag wasn't set on the recipient shard's " +
+ "oplog for delete op on the old {_id: 2} that overlapped " +
+ "with the chunk about to be copied! Test #1 failed.");
+
+ recipientOplogRes =
+ recipientLocal.oplog.rs.find({op: 'u', fromMigrate: true, 'o._id': 3}).count();
+ assert.eq(1,
+ recipientOplogRes,
+ "fromMigrate flag wasn't set on the recipient shard's " +
+ "oplog for update op on {_id: 3}! Test #4 failed.");
+
+ recipientOplogRes =
+ recipientLocal.oplog.rs.find({op: 'd', fromMigrate: true, 'o._id': 4}).count();
+ assert.eq(1,
+ recipientOplogRes,
+ "fromMigrate flag wasn't set on the recipient shard's " +
+ "oplog for delete op on {_id: 4} that occurred during " +
+ "migration! Test #5 failed.");
+
+ jsTest.log('DONE!');
+ st.stop();
})();
diff --git a/jstests/sharding/migration_with_source_ops.js b/jstests/sharding/migration_with_source_ops.js
index f518530c866..31b6fff75e9 100644
--- a/jstests/sharding/migration_with_source_ops.js
+++ b/jstests/sharding/migration_with_source_ops.js
@@ -18,135 +18,129 @@
load('./jstests/libs/chunk_manipulation_util.js');
(function() {
-"use strict";
-
-var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
-
-/**
- * Start up new sharded cluster, stop balancer that would interfere in manual chunk management.
- */
-
-var st = new ShardingTest({ shards : 2, mongos : 1 });
-st.stopBalancer();
-
-var mongos = st.s0,
- admin = mongos.getDB('admin'),
- shards = mongos.getCollection('config.shards').find().toArray(),
- dbName = "testDB",
- ns = dbName + ".foo",
- coll = mongos.getCollection(ns),
- donor = st.shard0,
- recipient = st.shard1,
- donorColl = donor.getCollection(ns),
- recipientColl = recipient.getCollection(ns);
-
-/**
- * Exable sharding, and split collection into two chunks.
- */
-
-// Two chunks
-// Donor: [0, 20) [20, 40)
-// Recipient:
-jsTest.log('Enabling sharding of the collection and pre-splitting into two chunks....');
-assert.commandWorked(admin.runCommand({enableSharding: dbName}));
-st.ensurePrimaryShard(dbName, shards[0]._id);
-assert.commandWorked(admin.runCommand({shardCollection: ns, key: {a: 1}}));
-assert.commandWorked(admin.runCommand({split: ns, middle: {a: 20}}));
-
-/**
- * Insert data into collection
- */
-
-// 10 documents in each chunk on the donor
-jsTest.log('Inserting 20 docs into donor shard, 10 in each chunk....');
-for (var i = 0; i < 10; ++i)
- assert.writeOK(coll.insert({a: i}));
-for (var i = 20; i < 30; ++i)
- assert.writeOK(coll.insert({a: i}));
-assert.eq(20, coll.count());
-
-/**
- * Set failpoints. Recipient will crash if an out of chunk range data op is
- * received from donor. Recipient will pause migration after cloning chunk data from donor,
- * before checking transfer mods log on donor.
- */
-
-jsTest.log('Setting failpoint failMigrationReceivedOutOfRangeOperation');
-assert.commandWorked(recipient.getDB('admin').runCommand(
+ "use strict";
+
+ var staticMongod = MongoRunner.runMongod({}); // For startParallelOps.
+
+ /**
+ * Start up new sharded cluster, stop balancer that would interfere in manual chunk management.
+ */
+
+ var st = new ShardingTest({shards: 2, mongos: 1});
+ st.stopBalancer();
+
+ var mongos = st.s0, admin = mongos.getDB('admin'),
+ shards = mongos.getCollection('config.shards').find().toArray(), dbName = "testDB",
+ ns = dbName + ".foo", coll = mongos.getCollection(ns), donor = st.shard0,
+ recipient = st.shard1, donorColl = donor.getCollection(ns),
+ recipientColl = recipient.getCollection(ns);
+
+ /**
+ * Exable sharding, and split collection into two chunks.
+ */
+
+ // Two chunks
+ // Donor: [0, 20) [20, 40)
+ // Recipient:
+ jsTest.log('Enabling sharding of the collection and pre-splitting into two chunks....');
+ assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+ st.ensurePrimaryShard(dbName, shards[0]._id);
+ assert.commandWorked(admin.runCommand({shardCollection: ns, key: {a: 1}}));
+ assert.commandWorked(admin.runCommand({split: ns, middle: {a: 20}}));
+
+ /**
+ * Insert data into collection
+ */
+
+ // 10 documents in each chunk on the donor
+ jsTest.log('Inserting 20 docs into donor shard, 10 in each chunk....');
+ for (var i = 0; i < 10; ++i)
+ assert.writeOK(coll.insert({a: i}));
+ for (var i = 20; i < 30; ++i)
+ assert.writeOK(coll.insert({a: i}));
+ assert.eq(20, coll.count());
+
+ /**
+ * Set failpoints. Recipient will crash if an out of chunk range data op is
+ * received from donor. Recipient will pause migration after cloning chunk data from donor,
+ * before checking transfer mods log on donor.
+ */
+
+ jsTest.log('Setting failpoint failMigrationReceivedOutOfRangeOperation');
+ assert.commandWorked(recipient.getDB('admin').runCommand(
{configureFailPoint: 'failMigrationReceivedOutOfRangeOperation', mode: 'alwaysOn'}));
-jsTest.log('Setting chunk migration recipient failpoint so that it pauses after bulk clone step');
-pauseMigrateAtStep(recipient, migrateStepNames.cloned);
-
-/**
- * Start a moveChunk in the background. Move chunk [20, 40), which has 10 docs in the
- * range, from shard 0 (donor) to shard 1 (recipient). Migration will pause after
- * cloning step (when it reaches the recipient failpoint).
- */
-
-// Donor: [0, 20)
-// Recipient: [20, 40)
-jsTest.log('Starting migration, pause after cloning...');
-var joinMoveChunk = moveChunkParallel(
- staticMongod,
- st.s0.host,
- {a: 20},
- null,
- coll.getFullName(),
- shards[1]._id);
-
-/**
- * Wait for recipient to finish cloning step.
- * THEN delete 10 documents on the donor shard, 5 in the migrating chunk and 5 in the remaining chunk.
- * AND insert 2 documents on the donor shard, 1 in the migrating chunk and 1 in the remaining chunk.
- * AND update 2 documents on the donor shard, 1 in the migrating chunk and 1 in the remaining chunk.
- *
- * This will populate the migration transfer mods log, which the recipient will collect when it
- * is unpaused.
- */
-
-waitForMigrateStep(recipient, migrateStepNames.cloned);
-
-jsTest.log('Deleting 5 docs from each chunk, migrating chunk and remaining chunk...');
-assert.writeOK(coll.remove({$and : [ {a: {$gte: 5}}, {a: {$lt: 25}} ]}));
-
-jsTest.log('Inserting 1 in the migrating chunk range and 1 in the remaining chunk range...');
-assert.writeOK(coll.insert({a: 10}));
-assert.writeOK(coll.insert({a: 30}));
-
-jsTest.log('Updating 1 in the migrating chunk range and 1 in the remaining chunk range...');
-assert.writeOK(coll.update({a: 0}, {a: 0, updatedData: "updated"}));
-assert.writeOK(coll.update({a: 25}, {a: 25, updatedData: "updated"}));
-
-/**
- * Finish migration. Unpause recipient migration, wait for it to collect
- * the new ops from the donor shard's migration transfer mods log, and finish.
- */
-
-jsTest.log('Continuing and finishing migration...');
-unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
-joinMoveChunk();
-
-/**
- * Check documents are where they should be: 6 docs in each shard's respective chunk.
- */
-
-jsTest.log('Checking that documents are on the shards they should be...');
-assert.eq(6, donorColl.count());
-assert.eq(6, recipientColl.count());
-assert.eq(12, coll.count());
-
-/**
- * Check that the updated documents are where they should be, one on each shard.
- */
-
-jsTest.log('Checking that documents were updated correctly...');
-var donorCollUpdatedNum = donorColl.find({updatedData: "updated"}).count();
-assert.eq(1, donorCollUpdatedNum, "Update failed on donor shard during migration!");
-var recipientCollUpdatedNum = recipientColl.find({updatedData: "updated"}).count();
-assert.eq(1, recipientCollUpdatedNum, "Update failed on recipient shard during migration!");
-
-jsTest.log('DONE!');
-st.stop();
+ jsTest.log(
+ 'Setting chunk migration recipient failpoint so that it pauses after bulk clone step');
+ pauseMigrateAtStep(recipient, migrateStepNames.cloned);
+
+ /**
+ * Start a moveChunk in the background. Move chunk [20, 40), which has 10 docs in the
+ * range, from shard 0 (donor) to shard 1 (recipient). Migration will pause after
+ * cloning step (when it reaches the recipient failpoint).
+ */
+
+ // Donor: [0, 20)
+ // Recipient: [20, 40)
+ jsTest.log('Starting migration, pause after cloning...');
+ var joinMoveChunk = moveChunkParallel(
+ staticMongod, st.s0.host, {a: 20}, null, coll.getFullName(), shards[1]._id);
+
+ /**
+ * Wait for recipient to finish cloning step.
+ * THEN delete 10 documents on the donor shard, 5 in the migrating chunk and 5 in the remaining
+ *chunk.
+ * AND insert 2 documents on the donor shard, 1 in the migrating chunk and 1 in the remaining
+ *chunk.
+ * AND update 2 documents on the donor shard, 1 in the migrating chunk and 1 in the remaining
+ *chunk.
+ *
+ * This will populate the migration transfer mods log, which the recipient will collect when it
+ * is unpaused.
+ */
+
+ waitForMigrateStep(recipient, migrateStepNames.cloned);
+
+ jsTest.log('Deleting 5 docs from each chunk, migrating chunk and remaining chunk...');
+ assert.writeOK(coll.remove({$and: [{a: {$gte: 5}}, {a: {$lt: 25}}]}));
+
+ jsTest.log('Inserting 1 in the migrating chunk range and 1 in the remaining chunk range...');
+ assert.writeOK(coll.insert({a: 10}));
+ assert.writeOK(coll.insert({a: 30}));
+
+ jsTest.log('Updating 1 in the migrating chunk range and 1 in the remaining chunk range...');
+ assert.writeOK(coll.update({a: 0}, {a: 0, updatedData: "updated"}));
+ assert.writeOK(coll.update({a: 25}, {a: 25, updatedData: "updated"}));
+
+ /**
+ * Finish migration. Unpause recipient migration, wait for it to collect
+ * the new ops from the donor shard's migration transfer mods log, and finish.
+ */
+
+ jsTest.log('Continuing and finishing migration...');
+ unpauseMigrateAtStep(recipient, migrateStepNames.cloned);
+ joinMoveChunk();
+
+ /**
+ * Check documents are where they should be: 6 docs in each shard's respective chunk.
+ */
+
+ jsTest.log('Checking that documents are on the shards they should be...');
+ assert.eq(6, donorColl.count());
+ assert.eq(6, recipientColl.count());
+ assert.eq(12, coll.count());
+
+ /**
+ * Check that the updated documents are where they should be, one on each shard.
+ */
+
+ jsTest.log('Checking that documents were updated correctly...');
+ var donorCollUpdatedNum = donorColl.find({updatedData: "updated"}).count();
+ assert.eq(1, donorCollUpdatedNum, "Update failed on donor shard during migration!");
+ var recipientCollUpdatedNum = recipientColl.find({updatedData: "updated"}).count();
+ assert.eq(1, recipientCollUpdatedNum, "Update failed on recipient shard during migration!");
+
+ jsTest.log('DONE!');
+ st.stop();
})();
diff --git a/jstests/sharding/min_optime_recovery.js b/jstests/sharding/min_optime_recovery.js
index b3e1b1c45cc..d77f1e2ad42 100644
--- a/jstests/sharding/min_optime_recovery.js
+++ b/jstests/sharding/min_optime_recovery.js
@@ -7,81 +7,81 @@
* @tags: [requires_persistence]
*/
(function() {
-"use strict";
+ "use strict";
-var runTest = function(withRecovery) {
- var st = new ShardingTest({ shards: 2 });
+ var runTest = function(withRecovery) {
+ var st = new ShardingTest({shards: 2});
- var testDB = st.s.getDB('test');
- testDB.adminCommand({ enableSharding: 'test' });
- st.ensurePrimaryShard('test', 'shard0000');
- testDB.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
+ var testDB = st.s.getDB('test');
+ testDB.adminCommand({enableSharding: 'test'});
+ st.ensurePrimaryShard('test', 'shard0000');
+ testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
- var opTimeBeforeMigrate = null;
- if (st.configRS) {
- var priConn = st.configRS.getPrimary();
- var replStatus = priConn.getDB('admin').runCommand({ replSetGetStatus: 1 });
- replStatus.members.forEach(function(memberState) {
- if (memberState.state == 1) { // if primary
- opTimeBeforeMigrate = memberState.optime;
+ var opTimeBeforeMigrate = null;
+ if (st.configRS) {
+ var priConn = st.configRS.getPrimary();
+ var replStatus = priConn.getDB('admin').runCommand({replSetGetStatus: 1});
+ replStatus.members.forEach(function(memberState) {
+ if (memberState.state == 1) { // if primary
+ opTimeBeforeMigrate = memberState.optime;
- assert.neq(null, opTimeBeforeMigrate);
- assert.neq(null, opTimeBeforeMigrate.ts);
- assert.neq(null, opTimeBeforeMigrate.t);
- }
- });
- }
+ assert.neq(null, opTimeBeforeMigrate);
+ assert.neq(null, opTimeBeforeMigrate.ts);
+ assert.neq(null, opTimeBeforeMigrate.t);
+ }
+ });
+ }
- testDB.adminCommand({ moveChunk: 'test.user', find: { x: 0 }, to: 'shard0001' });
+ testDB.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: 'shard0001'});
- var shardAdmin = st.d0.getDB('admin');
- var doc = shardAdmin.system.version.findOne();
+ var shardAdmin = st.d0.getDB('admin');
+ var doc = shardAdmin.system.version.findOne();
- if (st.configRS) {
- assert.neq(null, doc);
- assert.eq('minOpTimeRecovery', doc._id);
- assert.eq(st.configRS.getURL(), doc.configsvrConnectionString);
- assert.eq('shard0000', doc.shardName);
- assert.gt(doc.minOpTime.ts.getTime(), 0);
- }
- else {
- assert.eq(null, doc);
- }
+ if (st.configRS) {
+ assert.neq(null, doc);
+ assert.eq('minOpTimeRecovery', doc._id);
+ assert.eq(st.configRS.getURL(), doc.configsvrConnectionString);
+ assert.eq('shard0000', doc.shardName);
+ assert.gt(doc.minOpTime.ts.getTime(), 0);
+ } else {
+ assert.eq(null, doc);
+ }
- var restartCmdLineOptions = Object.merge(st.d0.fullOptions, {
- setParameter: 'recoverShardingState=' + (withRecovery? 'true' : 'false'),
- restart: true
- });
+ var restartCmdLineOptions = Object.merge(
+ st.d0.fullOptions,
+ {
+ setParameter: 'recoverShardingState=' + (withRecovery ? 'true' : 'false'),
+ restart: true
+ });
- // Restart the shard that donated a chunk to trigger the optime recovery logic.
- st.stopMongod(0);
- var newMongod = MongoRunner.runMongod(restartCmdLineOptions);
- var shardingSection = newMongod.getDB('admin').runCommand({ serverStatus: 1 }).sharding;
+ // Restart the shard that donated a chunk to trigger the optime recovery logic.
+ st.stopMongod(0);
+ var newMongod = MongoRunner.runMongod(restartCmdLineOptions);
+ var shardingSection = newMongod.getDB('admin').runCommand({serverStatus: 1}).sharding;
- if (st.configRS && withRecovery) {
- assert.neq(null, shardingSection);
+ if (st.configRS && withRecovery) {
+ assert.neq(null, shardingSection);
- // Confirm that the config server string points to an actual config server replica set.
- var configConnStr = shardingSection.configsvrConnectionString;
- var configConn = new Mongo(configConnStr);
- var configIsMaster = configConn.getDB('admin').runCommand({ isMaster: 1 });
- assert.gt(configConnStr.indexOf('/'), 0);
- assert.eq(1, configIsMaster.configsvr); // If it's a shard, this field won't exist.
+ // Confirm that the config server string points to an actual config server replica set.
+ var configConnStr = shardingSection.configsvrConnectionString;
+ var configConn = new Mongo(configConnStr);
+ var configIsMaster = configConn.getDB('admin').runCommand({isMaster: 1});
+ assert.gt(configConnStr.indexOf('/'), 0);
+ assert.eq(1, configIsMaster.configsvr); // If it's a shard, this field won't exist.
- var configOpTimeObj = shardingSection.lastSeenConfigServerOpTime;
- assert.neq(null, configOpTimeObj);
- assert.gte(configOpTimeObj.ts.getTime(), opTimeBeforeMigrate.ts.getTime());
- assert.gte(configOpTimeObj.t, opTimeBeforeMigrate.t);
- }
- else {
- assert.eq(null, shardingSection);
- }
+ var configOpTimeObj = shardingSection.lastSeenConfigServerOpTime;
+ assert.neq(null, configOpTimeObj);
+ assert.gte(configOpTimeObj.ts.getTime(), opTimeBeforeMigrate.ts.getTime());
+ assert.gte(configOpTimeObj.t, opTimeBeforeMigrate.t);
+ } else {
+ assert.eq(null, shardingSection);
+ }
- MongoRunner.stopMongod(newMongod.port);
- st.stop();
-};
+ MongoRunner.stopMongod(newMongod.port);
+ st.stop();
+ };
-runTest(true);
-runTest(false);
+ runTest(true);
+ runTest(false);
})();
diff --git a/jstests/sharding/missing_key.js b/jstests/sharding/missing_key.js
index 2eebc0d0912..588d85e1a95 100644
--- a/jstests/sharding/missing_key.js
+++ b/jstests/sharding/missing_key.js
@@ -1,40 +1,39 @@
// Test that the shardCollection command fails when a preexisting document lacks a shard key field.
// SERVER-8772
-var st = new ShardingTest( { shards: 1 } );
+var st = new ShardingTest({shards: 1});
st.stopBalancer();
-var db = st.s.getDB( 'testDb' );
+var db = st.s.getDB('testDb');
var coll = db.testColl;
-coll.insert( { x:1, z:1 } );
-coll.insert( { y:1, z:1 } );
-db.adminCommand( { enableSharding:'testDb' } );
+coll.insert({x: 1, z: 1});
+coll.insert({y: 1, z: 1});
+db.adminCommand({enableSharding: 'testDb'});
/**
* Assert that the shardCollection command fails, with a preexisting index on the provided
* 'shardKey'.
*/
-function assertInvalidShardKey( shardKey ) {
-
+function assertInvalidShardKey(shardKey) {
// Manually create a shard key index.
coll.dropIndexes();
- coll.ensureIndex( shardKey );
+ coll.ensureIndex(shardKey);
// Ensure that the shard key index identifies 'x' as present in one document and absent in the
// other.
- assert.eq( 1, coll.find( { x:1 } ).hint( shardKey ).itcount() );
- assert.eq( 1, coll.find( { x:{ $exists:false } } ).hint( shardKey ).itcount() );
+ assert.eq(1, coll.find({x: 1}).hint(shardKey).itcount());
+ assert.eq(1, coll.find({x: {$exists: false}}).hint(shardKey).itcount());
// Assert that the shardCollection command fails with the provided 'shardKey'.
- assert.commandFailed( db.adminCommand( { shardCollection:'testDb.testColl', key:shardKey } ),
- 'shardCollection should have failed on key ' + tojson( shardKey ) );
+ assert.commandFailed(db.adminCommand({shardCollection: 'testDb.testColl', key: shardKey}),
+ 'shardCollection should have failed on key ' + tojson(shardKey));
}
// Test single, compound, and hashed shard keys.
-assertInvalidShardKey( { x:1 } );
-assertInvalidShardKey( { x:1, y:1 } );
-assertInvalidShardKey( { y:1, x:1 } );
-assertInvalidShardKey( { x:'hashed' } );
+assertInvalidShardKey({x: 1});
+assertInvalidShardKey({x: 1, y: 1});
+assertInvalidShardKey({y: 1, x: 1});
+assertInvalidShardKey({x: 'hashed'});
st.stop();
diff --git a/jstests/sharding/mongos_no_detect_sharding.js b/jstests/sharding/mongos_no_detect_sharding.js
index 1e5c8832ac0..608b2ca0bdb 100644
--- a/jstests/sharding/mongos_no_detect_sharding.js
+++ b/jstests/sharding/mongos_no_detect_sharding.js
@@ -1,44 +1,42 @@
// Tests whether new sharding is detected on insert by mongos
(function() {
-var st = new ShardingTest({ name: "mongos_no_detect_sharding",
- shards: 1,
- mongos: 2 });
+ var st = new ShardingTest({name: "mongos_no_detect_sharding", shards: 1, mongos: 2});
-var mongos = st.s;
-var config = mongos.getDB("config");
+ var mongos = st.s;
+ var config = mongos.getDB("config");
-print( "Creating unsharded connection..." );
+ print("Creating unsharded connection...");
-var mongos2 = st._mongos[1];
+ var mongos2 = st._mongos[1];
-var coll = mongos2.getCollection( "test.foo" );
-coll.insert({ i : 0 });
+ var coll = mongos2.getCollection("test.foo");
+ coll.insert({i: 0});
-print( "Sharding collection..." );
+ print("Sharding collection...");
-var admin = mongos.getDB("admin");
+ var admin = mongos.getDB("admin");
-assert.eq( coll.getShardVersion().ok, 0 );
+ assert.eq(coll.getShardVersion().ok, 0);
-admin.runCommand({ enableSharding : "test" });
-admin.runCommand({ shardCollection : "test.foo", key : { _id : 1 } });
+ admin.runCommand({enableSharding: "test"});
+ admin.runCommand({shardCollection: "test.foo", key: {_id: 1}});
-print( "Seeing if data gets inserted unsharded..." );
-print( "No splits occur here!" );
+ print("Seeing if data gets inserted unsharded...");
+ print("No splits occur here!");
-// Insert a bunch of data which should trigger a split
-var bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < 100; i++ ){
- bulk.insert({ i : i + 1 });
-}
-assert.writeOK(bulk.execute());
+ // Insert a bunch of data which should trigger a split
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; i++) {
+ bulk.insert({i: i + 1});
+ }
+ assert.writeOK(bulk.execute());
-st.printShardingStatus( true );
+ st.printShardingStatus(true);
-assert.eq( coll.getShardVersion().ok, 1 );
-assert.eq( 101, coll.find().itcount() );
+ assert.eq(coll.getShardVersion().ok, 1);
+ assert.eq(101, coll.find().itcount());
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/mongos_no_replica_set_refresh.js b/jstests/sharding/mongos_no_replica_set_refresh.js
index 48aa3ca27f9..3d9af893b55 100644
--- a/jstests/sharding/mongos_no_replica_set_refresh.js
+++ b/jstests/sharding/mongos_no_replica_set_refresh.js
@@ -1,92 +1,100 @@
// Tests whether new sharding is detected on insert by mongos
load("jstests/replsets/rslib.js");
-(function () {
-'use strict';
-
-var st = new ShardingTest({ name: 'mongos_no_replica_set_refresh',
- shards: 1,
- mongos: 1,
- other: {
- rs0: {
- nodes: [
- {},
- {rsConfig: {priority: 0}},
- {rsConfig: {priority: 0}},
- ],
- }
- } });
-
-var rsObj = st.rs0;
-assert.commandWorked(
- rsObj.nodes[0].adminCommand({
+(function() {
+ 'use strict';
+
+ var st = new ShardingTest({
+ name: 'mongos_no_replica_set_refresh',
+ shards: 1,
+ mongos: 1,
+ other: {
+ rs0: {
+ nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}, ],
+ }
+ }
+ });
+
+ var rsObj = st.rs0;
+ assert.commandWorked(rsObj.nodes[0].adminCommand({
replSetTest: 1,
waitForMemberState: ReplSetTest.State.PRIMARY,
timeoutMillis: 60 * 1000,
}),
- 'node 0 ' + rsObj.nodes[0].host + ' failed to become primary'
-);
+ 'node 0 ' + rsObj.nodes[0].host + ' failed to become primary');
-var mongos = st.s;
-var config = mongos.getDB("config");
+ var mongos = st.s;
+ var config = mongos.getDB("config");
-printjson( mongos.getCollection("foo.bar").findOne() );
+ printjson(mongos.getCollection("foo.bar").findOne());
-jsTestLog( "Reconfiguring replica set..." );
+ jsTestLog("Reconfiguring replica set...");
-var rsConfig = rsObj.getReplSetConfigFromNode(0);
+ var rsConfig = rsObj.getReplSetConfigFromNode(0);
-// Now remove the last node in the config.
-var removedNode = rsConfig.members.pop();
-rsConfig.version++;
-reconfig(rsObj, rsConfig);
+ // Now remove the last node in the config.
+ var removedNode = rsConfig.members.pop();
+ rsConfig.version++;
+ reconfig(rsObj, rsConfig);
-// Wait for the election round to complete
-rsObj.getPrimary();
+ // Wait for the election round to complete
+ rsObj.getPrimary();
-var numRSHosts = function(){
- var result = assert.commandWorked(rsObj.nodes[0].adminCommand({ismaster : 1}));
- jsTestLog('Nodes in ' + rsObj.name + ': ' + tojson(result));
- return result.hosts.length + result.passives.length;
-};
+ var numRSHosts = function() {
+ var result = assert.commandWorked(rsObj.nodes[0].adminCommand({ismaster: 1}));
+ jsTestLog('Nodes in ' + rsObj.name + ': ' + tojson(result));
+ return result.hosts.length + result.passives.length;
+ };
-assert.soon( function(){ return numRSHosts() < 3; } );
+ assert.soon(function() {
+ return numRSHosts() < 3;
+ });
-var numMongosHosts = function(){
- var commandResult = assert.commandWorked(mongos.adminCommand("connPoolStats"));
- var result = commandResult.replicaSets[rsObj.name];
- jsTestLog('Nodes in ' + rsObj.name + ' connected to mongos: ' + tojson(result));
- return result.hosts.length;
-};
+ var numMongosHosts = function() {
+ var commandResult = assert.commandWorked(mongos.adminCommand("connPoolStats"));
+ var result = commandResult.replicaSets[rsObj.name];
+ jsTestLog('Nodes in ' + rsObj.name + ' connected to mongos: ' + tojson(result));
+ return result.hosts.length;
+ };
-// Wait for ReplicaSetMonitor to refresh; it should discover that the set now has only 2 hosts.
-assert.soon( function(){ return numMongosHosts() < 3; } );
+ // Wait for ReplicaSetMonitor to refresh; it should discover that the set now has only 2 hosts.
+ assert.soon(function() {
+ return numMongosHosts() < 3;
+ });
-jsTestLog( "Mongos successfully detected change..." );
+ jsTestLog("Mongos successfully detected change...");
-var configServerURL = function(){
- var result = config.shards.find().toArray()[0];
- printjson( result );
- return result.host;
-};
+ var configServerURL = function() {
+ var result = config.shards.find().toArray()[0];
+ printjson(result);
+ return result.host;
+ };
-assert.soon( function(){ return configServerURL().indexOf( removedNode.host ) < 0; } );
+ assert.soon(function() {
+ return configServerURL().indexOf(removedNode.host) < 0;
+ });
-jsTestLog( "Now test adding new replica set servers..." );
+ jsTestLog("Now test adding new replica set servers...");
-config.shards.update({ _id : rsObj.name }, { $set : { host : rsObj.name + "/" + rsObj.nodes[0].host } });
-printjson( config.shards.find().toArray() );
+ config.shards.update({_id: rsObj.name}, {$set: {host: rsObj.name + "/" + rsObj.nodes[0].host}});
+ printjson(config.shards.find().toArray());
-rsConfig.members.push(removedNode);
-rsConfig.version++;
-reconfig(rsObj, rsConfig);
+ rsConfig.members.push(removedNode);
+ rsConfig.version++;
+ reconfig(rsObj, rsConfig);
-assert.soon( function(){ return numRSHosts() > 2; } );
+ assert.soon(function() {
+ return numRSHosts() > 2;
+ });
-assert.soon( function(){ return numMongosHosts() > 2; } );
+ assert.soon(function() {
+ return numMongosHosts() > 2;
+ });
-assert.soon( function(){ return configServerURL().indexOf( removedNode.host ) >= 0; } );
+ assert.soon(function() {
+ return configServerURL().indexOf(removedNode.host) >= 0;
+ });
-st.stop();
+ st.stop();
}());
diff --git a/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
index f4dd12b0ecc..8eaf9653f11 100644
--- a/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js
@@ -11,48 +11,47 @@
// (connection connected after shard change).
//
-var options = { rs : true,
- rsOptions : { nodes : 2 },
- keyFile : "jstests/libs/key1" };
+var options = {
+ rs: true,
+ rsOptions: {nodes: 2},
+ keyFile: "jstests/libs/key1"
+};
-var st = new ShardingTest({shards : 3, mongos : 1, other : options});
+var st = new ShardingTest({shards: 3, mongos: 1, other: options});
var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
+var admin = mongos.getDB("admin");
jsTest.log("Setting up initial admin user...");
var adminUser = "adminUser";
var password = "password";
// Create a user
-admin.createUser({ user : adminUser, pwd : password, roles: [ "root" ] });
+admin.createUser({user: adminUser, pwd: password, roles: ["root"]});
// There's an admin user now, so we need to login to do anything
// Login as admin user
admin.auth(adminUser, password);
st.stopBalancer();
-var shards = mongos.getDB( "config" ).shards.find().toArray();
+var shards = mongos.getDB("config").shards.find().toArray();
-assert.commandWorked( admin.runCommand({ setParameter : 1, traceExceptions : true }) );
+assert.commandWorked(admin.runCommand({setParameter: 1, traceExceptions: true}));
-var collSharded = mongos.getCollection( "fooSharded.barSharded" );
-var collUnsharded = mongos.getCollection( "fooUnsharded.barUnsharded" );
+var collSharded = mongos.getCollection("fooSharded.barSharded");
+var collUnsharded = mongos.getCollection("fooUnsharded.barUnsharded");
// Create the unsharded database with shard0 primary
-assert.writeOK(collUnsharded.insert({ some : "doc" }));
+assert.writeOK(collUnsharded.insert({some: "doc"}));
assert.writeOK(collUnsharded.remove({}));
-printjson( admin.runCommand({ movePrimary : collUnsharded.getDB().toString(),
- to : shards[0]._id }) );
+printjson(admin.runCommand({movePrimary: collUnsharded.getDB().toString(), to: shards[0]._id}));
// Create the sharded database with shard1 primary
-assert.commandWorked( admin.runCommand({ enableSharding : collSharded.getDB().toString() }) );
-printjson( admin.runCommand({ movePrimary : collSharded.getDB().toString(), to : shards[1]._id }) );
-assert.commandWorked( admin.runCommand({ shardCollection : collSharded.toString(),
- key : { _id : 1 } }) );
-assert.commandWorked( admin.runCommand({ split : collSharded.toString(), middle : { _id : 0 } }) );
-assert.commandWorked( admin.runCommand({ moveChunk : collSharded.toString(),
- find : { _id : -1 },
- to : shards[0]._id }) );
+assert.commandWorked(admin.runCommand({enableSharding: collSharded.getDB().toString()}));
+printjson(admin.runCommand({movePrimary: collSharded.getDB().toString(), to: shards[1]._id}));
+assert.commandWorked(admin.runCommand({shardCollection: collSharded.toString(), key: {_id: 1}}));
+assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {_id: 0}}));
+assert.commandWorked(
+ admin.runCommand({moveChunk: collSharded.toString(), find: {_id: -1}, to: shards[0]._id}));
st.printShardingStatus();
var shardedDBUser = "shardedDBUser";
@@ -61,16 +60,14 @@ var unshardedDBUser = "unshardedDBUser";
jsTest.log("Setting up database users...");
// Create db users
-collSharded.getDB().createUser({ user : shardedDBUser,
- pwd : password, roles : [ "readWrite" ] });
-collUnsharded.getDB().createUser({ user : unshardedDBUser,
- pwd : password, roles : [ "readWrite" ] });
+collSharded.getDB().createUser({user: shardedDBUser, pwd: password, roles: ["readWrite"]});
+collUnsharded.getDB().createUser({user: unshardedDBUser, pwd: password, roles: ["readWrite"]});
admin.logout();
-function authDBUsers( conn ) {
- conn.getDB( collSharded.getDB().toString() ).auth(shardedDBUser, password);
- conn.getDB( collUnsharded.getDB().toString() ).auth(unshardedDBUser, password);
+function authDBUsers(conn) {
+ conn.getDB(collSharded.getDB().toString()).auth(shardedDBUser, password);
+ conn.getDB(collUnsharded.getDB().toString()).auth(unshardedDBUser, password);
return conn;
}
@@ -80,65 +77,67 @@ function authDBUsers( conn ) {
jsTest.log("Inserting initial data...");
-var mongosConnActive = authDBUsers( new Mongo( mongos.host ) );
+var mongosConnActive = authDBUsers(new Mongo(mongos.host));
authDBUsers(mongosConnActive);
var mongosConnIdle = null;
var mongosConnNew = null;
-var wc = {writeConcern: {w: 2, wtimeout: 60000}};
+var wc = {
+ writeConcern: {w: 2, wtimeout: 60000}
+};
-assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -1 }, wc));
-assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 1 }, wc));
-assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 1 }, wc));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}, wc));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}, wc));
+assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}, wc));
jsTest.log("Stopping primary of third shard...");
-mongosConnIdle = authDBUsers( new Mongo( mongos.host ) );
+mongosConnIdle = authDBUsers(new Mongo(mongos.host));
st.rs2.stop(st.rs2.getPrimary());
jsTest.log("Testing active connection with third primary down...");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -2 }, wc));
-assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 2 }, wc));
-assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 2 }, wc));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}, wc));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}, wc));
+assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}, wc));
jsTest.log("Testing idle connection with third primary down...");
-assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -3 }, wc));
-assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 3 }, wc));
-assert.writeOK(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 3 }, wc));
+assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}, wc));
+assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}, wc));
+assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}, wc));
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
jsTest.log("Testing new connections with third primary down...");
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -4 }, wc));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 4 }, wc));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeOK(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 4 }, wc));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}, wc));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}, wc));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}, wc));
-gc(); // Clean up new connections
+gc(); // Clean up new connections
jsTest.log("Stopping primary of second shard...");
mongosConnActive.setSlaveOk();
-mongosConnIdle = authDBUsers( new Mongo( mongos.host ) );
+mongosConnIdle = authDBUsers(new Mongo(mongos.host));
mongosConnIdle.setSlaveOk();
// Need to save this node for later
@@ -148,142 +147,137 @@ st.rs1.stop(st.rs1.getPrimary());
jsTest.log("Testing active connection with second primary down...");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -5 }, wc));
-assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 5 }, wc));
-assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 5 }, wc));
+assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}, wc));
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 5}, wc));
+assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}, wc));
jsTest.log("Testing idle connection with second primary down...");
-assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -6 }, wc));
-assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 6 }, wc));
-assert.writeOK(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 6 }, wc));
+assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}, wc));
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 6}, wc));
+assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}, wc));
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
jsTest.log("Testing new connections with second primary down...");
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -7 }, wc));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 7 }, wc));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeOK(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 7 }, wc));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}, wc));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 7}, wc));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}, wc));
-gc(); // Clean up new connections
+gc(); // Clean up new connections
jsTest.log("Stopping primary of first shard...");
mongosConnActive.setSlaveOk();
-mongosConnIdle = authDBUsers( new Mongo( mongos.host ) );
+mongosConnIdle = authDBUsers(new Mongo(mongos.host));
mongosConnIdle.setSlaveOk();
st.rs0.stop(st.rs0.getPrimary());
jsTest.log("Testing active connection with first primary down...");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -8 }));
-assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 8 }));
-assert.writeError(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 8 }));
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -8}));
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 8}));
+assert.writeError(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 8}));
jsTest.log("Testing idle connection with first primary down...");
-assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -9 }));
-assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 9 }));
-assert.writeError(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 9 }));
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -9}));
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 9}));
+assert.writeError(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 9}));
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
jsTest.log("Testing new connections with first primary down...");
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -10 }));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 10 }));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeError(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 10 }));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -10}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 10}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeError(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 10}));
-gc(); // Clean up new connections
+gc(); // Clean up new connections
jsTest.log("Stopping second shard...");
mongosConnActive.setSlaveOk();
-mongosConnIdle = authDBUsers( new Mongo( mongos.host ) );
+mongosConnIdle = authDBUsers(new Mongo(mongos.host));
mongosConnIdle.setSlaveOk();
st.rs1.stop(rs1Secondary);
jsTest.log("Testing active connection with second shard down...");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
+assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -11 }));
-assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 11 }));
-assert.writeError(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 11 }));
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -11}));
+assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 11}));
+assert.writeError(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 11}));
jsTest.log("Testing idle connection with second shard down...");
-assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -12 }));
-assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 12 }));
-assert.writeError(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 12 }));
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -12}));
+assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 12}));
+assert.writeError(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 12}));
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
jsTest.log("Testing new connections with second shard down...");
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
+assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -13 }));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 13 }));
-mongosConnNew = authDBUsers( new Mongo( mongos.host ) );
-assert.writeError(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 13 }));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -13}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 13}));
+mongosConnNew = authDBUsers(new Mongo(mongos.host));
+assert.writeError(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 13}));
-gc(); // Clean up new connections
+gc(); // Clean up new connections
jsTest.log("DONE!");
st.stop();
-
-
-
-
-
diff --git a/jstests/sharding/mongos_rs_shard_failure_tolerance.js b/jstests/sharding/mongos_rs_shard_failure_tolerance.js
index b5117439925..e24566605ce 100644
--- a/jstests/sharding/mongos_rs_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_rs_shard_failure_tolerance.js
@@ -11,406 +11,406 @@
// (connection connected after shard change).
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards: 3, mongos: 1, other: { rs: true, rsOptions: { nodes: 2 } } });
+ var st = new ShardingTest({shards: 3, mongos: 1, other: {rs: true, rsOptions: {nodes: 2}}});
-var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var shards = mongos.getDB( "config" ).shards.find().toArray();
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var shards = mongos.getDB("config").shards.find().toArray();
-assert.commandWorked( admin.runCommand({ setParameter : 1, traceExceptions : true }) );
+ assert.commandWorked(admin.runCommand({setParameter: 1, traceExceptions: true}));
-var collSharded = mongos.getCollection( "fooSharded.barSharded" );
-var collUnsharded = mongos.getCollection( "fooUnsharded.barUnsharded" );
+ var collSharded = mongos.getCollection("fooSharded.barSharded");
+ var collUnsharded = mongos.getCollection("fooUnsharded.barUnsharded");
-// Create the unsharded database
-assert.writeOK(collUnsharded.insert({ some : "doc" }));
-assert.writeOK(collUnsharded.remove({}));
-printjson( admin.runCommand({ movePrimary : collUnsharded.getDB().toString(),
- to : shards[0]._id }) );
+ // Create the unsharded database
+ assert.writeOK(collUnsharded.insert({some: "doc"}));
+ assert.writeOK(collUnsharded.remove({}));
+ printjson(admin.runCommand({movePrimary: collUnsharded.getDB().toString(), to: shards[0]._id}));
-// Create the sharded database
-assert.commandWorked( admin.runCommand({ enableSharding : collSharded.getDB().toString() }) );
-printjson( admin.runCommand({ movePrimary : collSharded.getDB().toString(), to : shards[0]._id }) );
-assert.commandWorked( admin.runCommand({ shardCollection : collSharded.toString(),
- key : { _id : 1 } }) );
-assert.commandWorked( admin.runCommand({ split : collSharded.toString(), middle : { _id : 0 } }) );
-assert.commandWorked( admin.runCommand({ moveChunk : collSharded.toString(),
- find : { _id : 0 },
- to : shards[1]._id }) );
+ // Create the sharded database
+ assert.commandWorked(admin.runCommand({enableSharding: collSharded.getDB().toString()}));
+ printjson(admin.runCommand({movePrimary: collSharded.getDB().toString(), to: shards[0]._id}));
+ assert.commandWorked(
+ admin.runCommand({shardCollection: collSharded.toString(), key: {_id: 1}}));
+ assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {_id: 0}}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: collSharded.toString(), find: {_id: 0}, to: shards[1]._id}));
-st.printShardingStatus();
+ st.printShardingStatus();
-//
-// Setup is complete
-//
+ //
+ // Setup is complete
+ //
-jsTest.log("Inserting initial data...");
+ jsTest.log("Inserting initial data...");
-var mongosConnActive = new Mongo( mongos.host );
-var mongosConnIdle = null;
-var mongosConnNew = null;
+ var mongosConnActive = new Mongo(mongos.host);
+ var mongosConnIdle = null;
+ var mongosConnNew = null;
-var wc = {writeConcern: {w: 2, wtimeout: 60000}};
+ var wc = {
+ writeConcern: {w: 2, wtimeout: 60000}
+ };
-assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -1 }, wc));
-assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 1 }, wc));
-assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 1 }, wc));
+ assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}, wc));
+ assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}, wc));
+ assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}, wc));
-jsTest.log("Stopping primary of third shard...");
+ jsTest.log("Stopping primary of third shard...");
-mongosConnIdle = new Mongo( mongos.host );
+ mongosConnIdle = new Mongo(mongos.host);
-st.rs2.stop(st.rs2.getPrimary());
+ st.rs2.stop(st.rs2.getPrimary());
-jsTest.log("Testing active connection with third primary down...");
+ jsTest.log("Testing active connection with third primary down...");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -2 }, wc));
-assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 2 }, wc));
-assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 2 }, wc));
+ assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}, wc));
+ assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}, wc));
+ assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}, wc));
-jsTest.log("Testing idle connection with third primary down...");
+ jsTest.log("Testing idle connection with third primary down...");
-assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -3 }, wc));
-assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 3 }, wc));
-assert.writeOK(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 3 }, wc));
+ assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}, wc));
+ assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}, wc));
+ assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}, wc));
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-jsTest.log("Testing new connections with third primary down...");
+ jsTest.log("Testing new connections with third primary down...");
-mongosConnNew = new Mongo( mongos.host );
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-mongosConnNew = new Mongo( mongos.host );
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-mongosConnNew = new Mongo( mongos.host );
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
-
-mongosConnNew = new Mongo( mongos.host );
-assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -4 }, wc));
-mongosConnNew = new Mongo( mongos.host );
-assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 4 }, wc));
-mongosConnNew = new Mongo( mongos.host );
-assert.writeOK(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 4 }, wc));
-
-gc(); // Clean up new connections
-
-jsTest.log("Stopping primary of second shard...");
-
-mongosConnIdle = new Mongo( mongos.host );
-
-// Need to save this node for later
-var rs1Secondary = st.rs1.getSecondary();
-
-st.rs1.stop(st.rs1.getPrimary());
-
-jsTest.log("Testing active connection with second primary down...");
-
-// Reads with read prefs
-mongosConnActive.setSlaveOk();
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-mongosConnActive.setSlaveOk(false);
-
-mongosConnActive.setReadPref("primary");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.throws(function() {
- mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 });
-});
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-//Ensure read prefs override slaveOK
-mongosConnActive.setSlaveOk();
-mongosConnActive.setReadPref("primary");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.throws(function() {
- mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 });
-});
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-mongosConnActive.setSlaveOk(false);
-
-mongosConnActive.setReadPref("secondary");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-mongosConnActive.setReadPref("primaryPreferred");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-mongosConnActive.setReadPref("secondaryPreferred");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-mongosConnActive.setReadPref("nearest");
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-// Writes
-assert.writeOK(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -5 }, wc));
-assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 5 }, wc));
-assert.writeOK(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 5 }, wc));
-
-jsTest.log("Testing idle connection with second primary down...");
-
-// Writes
-assert.writeOK(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -6 }, wc));
-assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 6 }, wc));
-assert.writeOK(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 6 }, wc));
-
-// Reads with read prefs
-mongosConnIdle.setSlaveOk();
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
-mongosConnIdle.setSlaveOk(false);
-
-mongosConnIdle.setReadPref("primary");
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.throws(function() {
- mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 });
-});
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-// Ensure read prefs override slaveOK
-mongosConnIdle.setSlaveOk();
-mongosConnIdle.setReadPref("primary");
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.throws(function() {
- mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 });
-});
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-mongosConnIdle.setSlaveOk(false);
-
-mongosConnIdle.setReadPref("secondary");
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-mongosConnIdle.setReadPref("primaryPreferred");
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-mongosConnIdle.setReadPref("secondaryPreferred");
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-mongosConnIdle.setReadPref("nearest");
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-jsTest.log("Testing new connections with second primary down...");
-
-// Reads with read prefs
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
-
-gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("primary");
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("primary");
-assert.throws(function() {
- mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 });
-});
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("primary");
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
-// Ensure read prefs override slaveok
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-mongosConnNew.setReadPref("primary");
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-mongosConnNew.setReadPref("primary");
-assert.throws(function() {
- mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 });
-});
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-mongosConnNew.setReadPref("primary");
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("secondary");
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("secondary");
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("secondary");
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("primaryPreferred");
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("primaryPreferred");
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("primaryPreferred");
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("secondaryPreferred");
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("secondaryPreferred");
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("secondaryPreferred");
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("nearest");
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("nearest");
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setReadPref("nearest");
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
-
-// Writes
-mongosConnNew = new Mongo( mongos.host );
-assert.writeOK(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -7 }, wc));
-mongosConnNew = new Mongo( mongos.host );
-assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 7 }, wc));
-mongosConnNew = new Mongo( mongos.host );
-assert.writeOK(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 7 }, wc));
-
-gc(); // Clean up new connections
-
-jsTest.log("Stopping primary of first shard...");
-
-mongosConnIdle = new Mongo( mongos.host );
-
-st.rs0.stop(st.rs0.getPrimary());
-
-jsTest.log("Testing active connection with first primary down...");
-
-mongosConnActive.setSlaveOk();
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : 1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
-
-assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -8 }));
-assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 8 }));
-assert.writeError(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 8 }));
-
-jsTest.log("Testing idle connection with first primary down...");
-
-assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -9 }));
-assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 9 }));
-assert.writeError(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 9 }));
-
-mongosConnIdle.setSlaveOk();
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
-
-jsTest.log("Testing new connections with first primary down...");
-
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : 1 }) );
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+ mongosConnNew = new Mongo(mongos.host);
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}, wc));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}, wc));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}, wc));
+
+ gc(); // Clean up new connections
+
+ jsTest.log("Stopping primary of second shard...");
+
+ mongosConnIdle = new Mongo(mongos.host);
+
+ // Need to save this node for later
+ var rs1Secondary = st.rs1.getSecondary();
+
+ st.rs1.stop(st.rs1.getPrimary());
+
+ jsTest.log("Testing active connection with second primary down...");
+
+ // Reads with read prefs
+ mongosConnActive.setSlaveOk();
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+ mongosConnActive.setSlaveOk(false);
+
+ mongosConnActive.setReadPref("primary");
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.throws(function() {
+ mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1});
+ });
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ // Ensure read prefs override slaveOK
+ mongosConnActive.setSlaveOk();
+ mongosConnActive.setReadPref("primary");
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.throws(function() {
+ mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1});
+ });
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+ mongosConnActive.setSlaveOk(false);
+
+ mongosConnActive.setReadPref("secondary");
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ mongosConnActive.setReadPref("primaryPreferred");
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ mongosConnActive.setReadPref("secondaryPreferred");
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ mongosConnActive.setReadPref("nearest");
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ // Writes
+ assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}, wc));
+ assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 5}, wc));
+ assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}, wc));
+
+ jsTest.log("Testing idle connection with second primary down...");
+
+ // Writes
+ assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}, wc));
+ assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 6}, wc));
+ assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}, wc));
+
+ // Reads with read prefs
+ mongosConnIdle.setSlaveOk();
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+ mongosConnIdle.setSlaveOk(false);
+
+ mongosConnIdle.setReadPref("primary");
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.throws(function() {
+ mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1});
+ });
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ // Ensure read prefs override slaveOK
+ mongosConnIdle.setSlaveOk();
+ mongosConnIdle.setReadPref("primary");
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.throws(function() {
+ mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1});
+ });
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+ mongosConnIdle.setSlaveOk(false);
+
+ mongosConnIdle.setReadPref("secondary");
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ mongosConnIdle.setReadPref("primaryPreferred");
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ mongosConnIdle.setReadPref("secondaryPreferred");
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ mongosConnIdle.setReadPref("nearest");
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ jsTest.log("Testing new connections with second primary down...");
+
+ // Reads with read prefs
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("primary");
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("primary");
+ assert.throws(function() {
+ mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1});
+ });
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("primary");
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+ // Ensure read prefs override slaveok
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ mongosConnNew.setReadPref("primary");
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ mongosConnNew.setReadPref("primary");
+ assert.throws(function() {
+ mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1});
+ });
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ mongosConnNew.setReadPref("primary");
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("secondary");
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("secondary");
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("secondary");
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("primaryPreferred");
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("primaryPreferred");
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("primaryPreferred");
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("secondaryPreferred");
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("secondaryPreferred");
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("secondaryPreferred");
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("nearest");
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("nearest");
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setReadPref("nearest");
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ gc(); // Clean up new connections incrementally to compensate for slow win32 machine.
+
+ // Writes
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}, wc));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 7}, wc));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}, wc));
+
+ gc(); // Clean up new connections
+
+ jsTest.log("Stopping primary of first shard...");
+
+ mongosConnIdle = new Mongo(mongos.host);
+
+ st.rs0.stop(st.rs0.getPrimary());
+
+ jsTest.log("Testing active connection with first primary down...");
+
+ mongosConnActive.setSlaveOk();
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -8}));
+ assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 8}));
+ assert.writeError(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 8}));
+
+ jsTest.log("Testing idle connection with first primary down...");
+
+ assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -9}));
+ assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 9}));
+ assert.writeError(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 9}));
+
+ mongosConnIdle.setSlaveOk();
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
+
+ jsTest.log("Testing new connections with first primary down...");
+
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-mongosConnNew = new Mongo( mongos.host );
-assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -10 }));
-mongosConnNew = new Mongo( mongos.host );
-assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 10 }));
-mongosConnNew = new Mongo( mongos.host );
-assert.writeError(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 10 }));
-
-gc(); // Clean up new connections
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -10}));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 10}));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeError(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 10}));
+
+ gc(); // Clean up new connections
-jsTest.log("Stopping second shard...");
+ jsTest.log("Stopping second shard...");
-mongosConnIdle = new Mongo( mongos.host );
+ mongosConnIdle = new Mongo(mongos.host);
-st.rs1.stop(rs1Secondary);
-
-jsTest.log("Testing active connection with second shard down...");
+ st.rs1.stop(rs1Secondary);
+
+ jsTest.log("Testing active connection with second shard down...");
-mongosConnActive.setSlaveOk();
-assert.neq(null, mongosConnActive.getCollection( collSharded.toString() ).findOne({ _id : -1 }));
-assert.neq(null, mongosConnActive.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }));
+ mongosConnActive.setSlaveOk();
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : -11 }));
-assert.writeError(mongosConnActive.getCollection( collSharded.toString() ).insert({ _id : 11 }));
-assert.writeError(mongosConnActive.getCollection( collUnsharded.toString() ).insert({ _id : 11 }));
+ assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -11}));
+ assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 11}));
+ assert.writeError(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 11}));
-jsTest.log("Testing idle connection with second shard down...");
+ jsTest.log("Testing idle connection with second shard down...");
-assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : -12 }));
-assert.writeError(mongosConnIdle.getCollection( collSharded.toString() ).insert({ _id : 12 }));
-assert.writeError(mongosConnIdle.getCollection( collUnsharded.toString() ).insert({ _id : 12 }));
+ assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -12}));
+ assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 12}));
+ assert.writeError(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 12}));
-mongosConnIdle.setSlaveOk();
-assert.neq(null, mongosConnIdle.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-assert.neq(null, mongosConnIdle.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+ mongosConnIdle.setSlaveOk();
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-jsTest.log("Testing new connections with second shard down...");
+ jsTest.log("Testing new connections with second shard down...");
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collSharded.toString() ).findOne({ _id : -1 }) );
-mongosConnNew = new Mongo( mongos.host );
-mongosConnNew.setSlaveOk();
-assert.neq(null, mongosConnNew.getCollection( collUnsharded.toString() ).findOne({ _id : 1 }) );
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(mongos.host);
+ mongosConnNew.setSlaveOk();
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-mongosConnNew = new Mongo( mongos.host );
-assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : -13 }));
-mongosConnNew = new Mongo( mongos.host );
-assert.writeError(mongosConnNew.getCollection( collSharded.toString() ).insert({ _id : 13 }));
-mongosConnNew = new Mongo( mongos.host );
-assert.writeError(mongosConnNew.getCollection( collUnsharded.toString() ).insert({ _id : 13 }));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -13}));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 13}));
+ mongosConnNew = new Mongo(mongos.host);
+ assert.writeError(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 13}));
-gc(); // Clean up new connections
+ gc(); // Clean up new connections
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/mongos_shard_failure_tolerance.js b/jstests/sharding/mongos_shard_failure_tolerance.js
index 9b26bbd7eb8..73455666635 100644
--- a/jstests/sharding/mongos_shard_failure_tolerance.js
+++ b/jstests/sharding/mongos_shard_failure_tolerance.js
@@ -11,131 +11,130 @@
// (connection connected after shard change).
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards: 3, mongos: 1 });
+ var st = new ShardingTest({shards: 3, mongos: 1});
-var admin = st.s0.getDB("admin");
+ var admin = st.s0.getDB("admin");
-var collSharded = st.s0.getCollection("fooSharded.barSharded");
-var collUnsharded = st.s0.getCollection("fooUnsharded.barUnsharded");
+ var collSharded = st.s0.getCollection("fooSharded.barSharded");
+ var collUnsharded = st.s0.getCollection("fooUnsharded.barUnsharded");
-assert.commandWorked(admin.runCommand({ enableSharding: collSharded.getDB().toString() }));
-st.ensurePrimaryShard(collSharded.getDB().toString(), st.shard0.shardName);
+ assert.commandWorked(admin.runCommand({enableSharding: collSharded.getDB().toString()}));
+ st.ensurePrimaryShard(collSharded.getDB().toString(), st.shard0.shardName);
-assert.commandWorked(admin.runCommand({ shardCollection: collSharded.toString(),
- key: { _id: 1 } }));
-assert.commandWorked(admin.runCommand({ split: collSharded.toString(), middle: { _id: 0 } }));
-assert.commandWorked(admin.runCommand({ moveChunk: collSharded.toString(),
- find: { _id: 0 },
- to: st.shard1.shardName }));
+ assert.commandWorked(
+ admin.runCommand({shardCollection: collSharded.toString(), key: {_id: 1}}));
+ assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {_id: 0}}));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: collSharded.toString(), find: {_id: 0}, to: st.shard1.shardName}));
-// Create the unsharded database
-assert.writeOK(collUnsharded.insert({ some: "doc" }));
-assert.writeOK(collUnsharded.remove({}));
-assert.commandWorked(
- admin.runCommand({ movePrimary: collUnsharded.getDB().toString(), to: st.shard0.shardName }));
+ // Create the unsharded database
+ assert.writeOK(collUnsharded.insert({some: "doc"}));
+ assert.writeOK(collUnsharded.remove({}));
+ assert.commandWorked(admin.runCommand(
+ {movePrimary: collUnsharded.getDB().toString(), to: st.shard0.shardName}));
-//
-// Setup is complete
-//
+ //
+ // Setup is complete
+ //
-jsTest.log("Inserting initial data...");
+ jsTest.log("Inserting initial data...");
-var mongosConnActive = new Mongo(st.s0.host);
-var mongosConnIdle = null;
-var mongosConnNew = null;
+ var mongosConnActive = new Mongo(st.s0.host);
+ var mongosConnIdle = null;
+ var mongosConnNew = null;
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({ _id: -1 }));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({ _id: 1 }));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({ _id: 1 }));
+ assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -1}));
+ assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 1}));
+ assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 1}));
-jsTest.log("Stopping third shard...");
+ jsTest.log("Stopping third shard...");
-mongosConnIdle = new Mongo(st.s0.host);
+ mongosConnIdle = new Mongo(st.s0.host);
-MongoRunner.stopMongod(st.shard2);
+ MongoRunner.stopMongod(st.shard2);
-jsTest.log("Testing active connection...");
+ jsTest.log("Testing active connection...");
-assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({ _id: -1 }));
-assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({ _id: 1 }));
-assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({ _id: 1 }));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({ _id: -2 }));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({ _id: 2 }));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({ _id: 2 }));
+ assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -2}));
+ assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 2}));
+ assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 2}));
-jsTest.log("Testing idle connection...");
+ jsTest.log("Testing idle connection...");
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({ _id: -3 }));
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({ _id: 3 }));
-assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({ _id: 3 }));
+ assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -3}));
+ assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 3}));
+ assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 3}));
-assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({ _id: -1 }));
-assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({ _id: 1 }));
-assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({ _id: 1 }));
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: 1}));
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-jsTest.log("Testing new connections...");
+ jsTest.log("Testing new connections...");
-mongosConnNew = new Mongo(st.s0.host);
-assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({ _id: -1 }));
-mongosConnNew = new Mongo(st.s0.host);
-assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({ _id: 1 }));
-mongosConnNew = new Mongo(st.s0.host);
-assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({ _id: 1 }));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: 1}));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-mongosConnNew = new Mongo(st.s0.host);
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({ _id: -4 }));
-mongosConnNew = new Mongo(st.s0.host);
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({ _id: 4 }));
-mongosConnNew = new Mongo(st.s0.host);
-assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({ _id: 4 }));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -4}));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 4}));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 4}));
-gc(); // Clean up new connections
+ gc(); // Clean up new connections
-jsTest.log("Stopping second shard...");
+ jsTest.log("Stopping second shard...");
-mongosConnIdle = new Mongo(st.s0.host);
+ mongosConnIdle = new Mongo(st.s0.host);
-MongoRunner.stopMongod(st.shard1);
+ MongoRunner.stopMongod(st.shard1);
-jsTest.log("Testing active connection...");
+ jsTest.log("Testing active connection...");
-assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({ _id: -1 }));
-assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({ _id: 1 }));
+ assert.neq(null, mongosConnActive.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnActive.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({ _id: -5 }));
+ assert.writeOK(mongosConnActive.getCollection(collSharded.toString()).insert({_id: -5}));
-assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({ _id: 5 }));
-assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({ _id: 5 }));
+ assert.writeError(mongosConnActive.getCollection(collSharded.toString()).insert({_id: 5}));
+ assert.writeOK(mongosConnActive.getCollection(collUnsharded.toString()).insert({_id: 5}));
-jsTest.log("Testing idle connection...");
+ jsTest.log("Testing idle connection...");
-assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({ _id: -6 }));
-assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({ _id: 6 }));
-assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({ _id: 6 }));
+ assert.writeOK(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: -6}));
+ assert.writeError(mongosConnIdle.getCollection(collSharded.toString()).insert({_id: 6}));
+ assert.writeOK(mongosConnIdle.getCollection(collUnsharded.toString()).insert({_id: 6}));
-assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({ _id: -1 }));
-assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({ _id: 1 }));
+ assert.neq(null, mongosConnIdle.getCollection(collSharded.toString()).findOne({_id: -1}));
+ assert.neq(null, mongosConnIdle.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-jsTest.log("Testing new connections...");
+ jsTest.log("Testing new connections...");
-mongosConnNew = new Mongo(st.s0.host);
-assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({ _id: -1 }));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.neq(null, mongosConnNew.getCollection(collSharded.toString()).findOne({_id: -1}));
-mongosConnNew = new Mongo(st.s0.host);
-assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({ _id: 1 }));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.neq(null, mongosConnNew.getCollection(collUnsharded.toString()).findOne({_id: 1}));
-mongosConnNew = new Mongo(st.s0.host);
-assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({ _id: -7 }));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.writeOK(mongosConnNew.getCollection(collSharded.toString()).insert({_id: -7}));
-mongosConnNew = new Mongo(st.s0.host);
-assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({ _id: 7 }));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.writeError(mongosConnNew.getCollection(collSharded.toString()).insert({_id: 7}));
-mongosConnNew = new Mongo(st.s0.host);
-assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({ _id: 7 }));
+ mongosConnNew = new Mongo(st.s0.host);
+ assert.writeOK(mongosConnNew.getCollection(collUnsharded.toString()).insert({_id: 7}));
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/mongos_validate_backoff.js b/jstests/sharding/mongos_validate_backoff.js
index ef057c04ef2..f78dae0677e 100644
--- a/jstests/sharding/mongos_validate_backoff.js
+++ b/jstests/sharding/mongos_validate_backoff.js
@@ -1,60 +1,60 @@
-// Ensures that single mongos shard-key errors are fast, but slow down when many are triggered
+// Ensures that single mongos shard-key errors are fast, but slow down when many are triggered
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards : 1, mongos : 1 });
+ var st = new ShardingTest({shards: 1, mongos: 1});
-var mongos = st.s0;
-var admin = mongos.getDB("admin");
-var coll = mongos.getCollection("foo.bar");
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var coll = mongos.getCollection("foo.bar");
-assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB() + "" }));
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
-coll.ensureIndex({ shardKey : 1 });
-assert.commandWorked(admin.runCommand({ shardCollection : coll + "", key : { shardKey : 1 } }));
+ coll.ensureIndex({shardKey: 1});
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {shardKey: 1}}));
-var timeBadInsert = function() {
- var start = new Date().getTime();
+ var timeBadInsert = function() {
+ var start = new Date().getTime();
- // Bad insert, no shard key
- assert.writeError(coll.insert({ hello : "world" }));
+ // Bad insert, no shard key
+ assert.writeError(coll.insert({hello: "world"}));
- var end = new Date().getTime();
+ var end = new Date().getTime();
- return end - start;
-};
+ return end - start;
+ };
-// We need to work at least twice in order to check resetting the counter
-var successNeeded = 2;
-var success = 0;
+ // We need to work at least twice in order to check resetting the counter
+ var successNeeded = 2;
+ var success = 0;
-// Loop over this test a few times, to ensure that the error counters get reset if we don't have
-// bad inserts over a long enough time.
-for (var test = 0; test < 5; test++) {
- var firstWait = timeBadInsert();
- var lastWait = 0;
+ // Loop over this test a few times, to ensure that the error counters get reset if we don't have
+ // bad inserts over a long enough time.
+ for (var test = 0; test < 5; test++) {
+ var firstWait = timeBadInsert();
+ var lastWait = 0;
- for(var i = 0; i < 20; i++) {
- printjson(lastWait = timeBadInsert());
- }
-
- // As a heuristic test, we want to make sure that the error wait after sleeping is much less
- // than the error wait after a lot of errors.
- if (lastWait > firstWait * 2 * 2) {
- success++;
- }
-
- if (success >= successNeeded) {
- break;
- }
+ for (var i = 0; i < 20; i++) {
+ printjson(lastWait = timeBadInsert());
+ }
- // Abort if we've failed too many times
- assert.lt(test, 4);
+ // As a heuristic test, we want to make sure that the error wait after sleeping is much less
+ // than the error wait after a lot of errors.
+ if (lastWait > firstWait * 2 * 2) {
+ success++;
+ }
- // Sleeping for long enough to reset our exponential counter
- sleep(3000);
-}
+ if (success >= successNeeded) {
+ break;
+ }
+
+ // Abort if we've failed too many times
+ assert.lt(test, 4);
+
+ // Sleeping for long enough to reset our exponential counter
+ sleep(3000);
+ }
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/mongos_validate_writes.js b/jstests/sharding/mongos_validate_writes.js
index b0843cd5cdb..34d40b61172 100644
--- a/jstests/sharding/mongos_validate_writes.js
+++ b/jstests/sharding/mongos_validate_writes.js
@@ -4,7 +4,7 @@
// Note that this is *unsafe* with broadcast removes and updates
//
-var st = new ShardingTest({ shards : 2, mongos : 3, other : { shardOptions : { verbose : 2 } } });
+var st = new ShardingTest({shards: 2, mongos: 3, other: {shardOptions: {verbose: 2}}});
st.stopBalancer();
var mongos = st.s0;
@@ -12,22 +12,22 @@ var staleMongosA = st.s1;
var staleMongosB = st.s2;
// Additional logging
-printjson( mongos.getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 }) );
-printjson( staleMongosA.getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 }) );
-printjson( staleMongosB.getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 }) );
-printjson( st._connections[0].getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 }) );
-printjson( st._connections[1].getDB( "admin" ).runCommand({ setParameter : 1, logLevel : 2 }) );
-
-var admin = mongos.getDB( "admin" );
-var config = mongos.getDB( "config" );
-var coll = mongos.getCollection( "foo.bar" );
-var staleCollA = staleMongosA.getCollection( coll + "" );
-var staleCollB = staleMongosB.getCollection( coll + "" );
-
-printjson( admin.runCommand({ enableSharding : coll.getDB() + "" }) );
+printjson(mongos.getDB("admin").runCommand({setParameter: 1, logLevel: 2}));
+printjson(staleMongosA.getDB("admin").runCommand({setParameter: 1, logLevel: 2}));
+printjson(staleMongosB.getDB("admin").runCommand({setParameter: 1, logLevel: 2}));
+printjson(st._connections[0].getDB("admin").runCommand({setParameter: 1, logLevel: 2}));
+printjson(st._connections[1].getDB("admin").runCommand({setParameter: 1, logLevel: 2}));
+
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var coll = mongos.getCollection("foo.bar");
+var staleCollA = staleMongosA.getCollection(coll + "");
+var staleCollB = staleMongosB.getCollection(coll + "");
+
+printjson(admin.runCommand({enableSharding: coll.getDB() + ""}));
st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-coll.ensureIndex({ a : 1 });
-printjson( admin.runCommand({ shardCollection : coll + "", key : { a : 1 } }) );
+coll.ensureIndex({a: 1});
+printjson(admin.runCommand({shardCollection: coll + "", key: {a: 1}}));
// Let the stale mongos see the collection state
staleCollA.findOne();
@@ -35,57 +35,57 @@ staleCollB.findOne();
// Change the collection sharding state
coll.drop();
-coll.ensureIndex({ b : 1 });
-printjson( admin.runCommand({ shardCollection : coll + "", key : { b : 1 } }) );
+coll.ensureIndex({b: 1});
+printjson(admin.runCommand({shardCollection: coll + "", key: {b: 1}}));
// Make sure that we can successfully insert, even though we have stale state
-assert.writeOK(staleCollA.insert({ b : "b" }));
+assert.writeOK(staleCollA.insert({b: "b"}));
// Make sure we unsuccessfully insert with old info
-assert.writeError(staleCollB.insert({ a : "a" }));
+assert.writeError(staleCollB.insert({a: "a"}));
// Change the collection sharding state
coll.drop();
-coll.ensureIndex({ c : 1 });
-printjson( admin.runCommand({ shardCollection : coll + "", key : { c : 1 } }) );
+coll.ensureIndex({c: 1});
+printjson(admin.runCommand({shardCollection: coll + "", key: {c: 1}}));
// Make sure we can successfully upsert, even though we have stale state
-assert.writeOK(staleCollA.update({ c : "c" }, { c : "c" }, true ));
+assert.writeOK(staleCollA.update({c: "c"}, {c: "c"}, true));
// Make sure we unsuccessfully upsert with old info
-assert.writeError(staleCollB.update({ b : "b" }, { b : "b" }, true ));
+assert.writeError(staleCollB.update({b: "b"}, {b: "b"}, true));
// Change the collection sharding state
coll.drop();
-coll.ensureIndex({ d : 1 });
-printjson( admin.runCommand({ shardCollection : coll + "", key : { d : 1 } }) );
+coll.ensureIndex({d: 1});
+printjson(admin.runCommand({shardCollection: coll + "", key: {d: 1}}));
// Make sure we can successfully update, even though we have stale state
-assert.writeOK(coll.insert({ d : "d" }));
+assert.writeOK(coll.insert({d: "d"}));
-assert.writeOK(staleCollA.update({ d : "d" }, { $set : { x : "x" } }, false, false ));
-assert.eq( staleCollA.findOne().x, "x" );
+assert.writeOK(staleCollA.update({d: "d"}, {$set: {x: "x"}}, false, false));
+assert.eq(staleCollA.findOne().x, "x");
// Make sure we unsuccessfully update with old info
-assert.writeError(staleCollB.update({ c : "c" }, { $set : { x : "y" } }, false, false ));
-assert.eq( staleCollB.findOne().x, "x" );
+assert.writeError(staleCollB.update({c: "c"}, {$set: {x: "y"}}, false, false));
+assert.eq(staleCollB.findOne().x, "x");
// Change the collection sharding state
coll.drop();
-coll.ensureIndex({ e : 1 });
+coll.ensureIndex({e: 1});
// Deletes need to be across two shards to trigger an error - this is probably an exceptional case
-printjson( admin.runCommand({ movePrimary : coll.getDB() + "", to : "shard0000" }) );
-printjson( admin.runCommand({ shardCollection : coll + "", key : { e : 1 } }) );
-printjson( admin.runCommand({ split : coll + "", middle : { e : 0 } }) );
-printjson( admin.runCommand({ moveChunk : coll + "", find : { e : 0 }, to : "shard0001" }) );
+printjson(admin.runCommand({movePrimary: coll.getDB() + "", to: "shard0000"}));
+printjson(admin.runCommand({shardCollection: coll + "", key: {e: 1}}));
+printjson(admin.runCommand({split: coll + "", middle: {e: 0}}));
+printjson(admin.runCommand({moveChunk: coll + "", find: {e: 0}, to: "shard0001"}));
// Make sure we can successfully remove, even though we have stale state
-assert.writeOK(coll.insert({ e : "e" }));
+assert.writeOK(coll.insert({e: "e"}));
-assert.writeOK(staleCollA.remove({ e : "e" }, true));
-assert.eq( null, staleCollA.findOne() );
+assert.writeOK(staleCollA.remove({e: "e"}, true));
+assert.eq(null, staleCollA.findOne());
// Make sure we unsuccessfully remove with old info
-assert.writeError(staleCollB.remove({ d : "d" }, true ));
+assert.writeError(staleCollB.remove({d: "d"}, true));
st.stop();
diff --git a/jstests/sharding/movePrimary1.js b/jstests/sharding/movePrimary1.js
index cd0478b1a1e..25217879e6d 100644
--- a/jstests/sharding/movePrimary1.js
+++ b/jstests/sharding/movePrimary1.js
@@ -1,52 +1,54 @@
(function() {
-var s = new ShardingTest({ name: "movePrimary1", shards: 2 });
-
-initDB = function( name ){
- var db = s.getDB( name );
- var c = db.foo;
- c.save( { a : 1 } );
- c.save( { a : 2 } );
- c.save( { a : 3 } );
- assert.eq( 3 , c.count() );
-
- return s.getPrimaryShard( name );
-};
-
-from = initDB( "test1" );
-to = s.getOther( from );
-
-assert.eq( 3 , from.getDB( "test1" ).foo.count() , "from doesn't have data before move" );
-assert.eq( 0 , to.getDB( "test1" ).foo.count() , "to has data before move" );
-
-assert.eq( s.normalize( s.config.databases.findOne( { _id : "test1" } ).primary ) ,
- s.normalize( from.name ) , "not in db correctly to start" );
-s.printShardingStatus();
-oldShardName = s.config.databases.findOne( {_id: "test1"} ).primary;
-s.admin.runCommand( { moveprimary : "test1" , to : to.name } );
-s.printShardingStatus();
-assert.eq( s.normalize( s.config.databases.findOne( { _id : "test1" } ).primary ),
- s.normalize( to.name ) , "to in config db didn't change after first move" );
-
-assert.eq( 0 , from.getDB( "test1" ).foo.count() , "from still has data after move" );
-assert.eq( 3 , to.getDB( "test1" ).foo.count() , "to doesn't have data after move" );
-
-// move back, now using shard name instead of server address
-s.admin.runCommand( { moveprimary : "test1" , to : oldShardName } );
-s.printShardingStatus();
-assert.eq( s.normalize( s.config.databases.findOne( { _id : "test1" } ).primary ),
- oldShardName , "to in config db didn't change after second move" );
-
-assert.eq( 3 , from.getDB( "test1" ).foo.count() , "from doesn't have data after move back" );
-assert.eq( 0 , to.getDB( "test1" ).foo.count() , "to has data after move back" );
-
-// attempting to move primary DB to non-existent shard should error out with appropriate code
-var res = s.admin.runCommand({ movePrimary: 'test1', to: 'dontexist' });
-assert.commandFailed(res,
- 'attempting to use non-existent shard as primary should error out');
-// ErrorCodes::ShardNotFound === 70
-assert.eq(res.code, 70, 'ShardNotFound code not used');
-
-s.stop();
+ var s = new ShardingTest({name: "movePrimary1", shards: 2});
+
+ initDB = function(name) {
+ var db = s.getDB(name);
+ var c = db.foo;
+ c.save({a: 1});
+ c.save({a: 2});
+ c.save({a: 3});
+ assert.eq(3, c.count());
+
+ return s.getPrimaryShard(name);
+ };
+
+ from = initDB("test1");
+ to = s.getOther(from);
+
+ assert.eq(3, from.getDB("test1").foo.count(), "from doesn't have data before move");
+ assert.eq(0, to.getDB("test1").foo.count(), "to has data before move");
+
+ assert.eq(s.normalize(s.config.databases.findOne({_id: "test1"}).primary),
+ s.normalize(from.name),
+ "not in db correctly to start");
+ s.printShardingStatus();
+ oldShardName = s.config.databases.findOne({_id: "test1"}).primary;
+ s.admin.runCommand({moveprimary: "test1", to: to.name});
+ s.printShardingStatus();
+ assert.eq(s.normalize(s.config.databases.findOne({_id: "test1"}).primary),
+ s.normalize(to.name),
+ "to in config db didn't change after first move");
+
+ assert.eq(0, from.getDB("test1").foo.count(), "from still has data after move");
+ assert.eq(3, to.getDB("test1").foo.count(), "to doesn't have data after move");
+
+ // move back, now using shard name instead of server address
+ s.admin.runCommand({moveprimary: "test1", to: oldShardName});
+ s.printShardingStatus();
+ assert.eq(s.normalize(s.config.databases.findOne({_id: "test1"}).primary),
+ oldShardName,
+ "to in config db didn't change after second move");
+
+ assert.eq(3, from.getDB("test1").foo.count(), "from doesn't have data after move back");
+ assert.eq(0, to.getDB("test1").foo.count(), "to has data after move back");
+
+ // attempting to move primary DB to non-existent shard should error out with appropriate code
+ var res = s.admin.runCommand({movePrimary: 'test1', to: 'dontexist'});
+ assert.commandFailed(res, 'attempting to use non-existent shard as primary should error out');
+ // ErrorCodes::ShardNotFound === 70
+ assert.eq(res.code, 70, 'ShardNotFound code not used');
+
+ s.stop();
})();
diff --git a/jstests/sharding/move_chunk_basic.js b/jstests/sharding/move_chunk_basic.js
index 35703b6baa6..354a222da10 100644
--- a/jstests/sharding/move_chunk_basic.js
+++ b/jstests/sharding/move_chunk_basic.js
@@ -3,94 +3,95 @@
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({mongos:1, shards:2});
+ var st = new ShardingTest({mongos: 1, shards: 2});
-var mongos = st.s0;
+ var mongos = st.s0;
-var kDbName = 'db';
+ var kDbName = 'db';
-var shards = mongos.getCollection('config.shards').find().toArray();
+ var shards = mongos.getCollection('config.shards').find().toArray();
-var shard0 = shards[0]._id;
-var shard1 = shards[1]._id;
+ var shard0 = shards[0]._id;
+ var shard1 = shards[1]._id;
-function testHashed() {
- var ns = kDbName + '.fooHashed';
+ function testHashed() {
+ var ns = kDbName + '.fooHashed';
- // Errors if either bounds is not a valid shard key
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {_id: 'hashed'}}));
+ // Errors if either bounds is not a valid shard key
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {_id: 'hashed'}}));
- var aChunk = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0});
- assert(aChunk);
- assert.commandFailed(mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max-1],
- to: shard1}));
+ var aChunk = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0});
+ assert(aChunk);
+ assert.commandFailed(mongos.adminCommand(
+ {moveChunk: ns, bounds: [aChunk.min, aChunk.max - 1], to: shard1}));
- // Fail if find and bounds are both set.
- assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: {_id: 1},
- bounds: [aChunk.min, aChunk.max], to: shard1}));
+ // Fail if find and bounds are both set.
+ assert.commandFailed(mongos.adminCommand(
+ {moveChunk: ns, find: {_id: 1}, bounds: [aChunk.min, aChunk.max], to: shard1}));
- // Using find on collections with hash shard keys should not crash
- assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: {_id: 1}, to: shard1}));
+ // Using find on collections with hash shard keys should not crash
+ assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: {_id: 1}, to: shard1}));
- // Fail if chunk is already at shard
- assert.commandFailed(mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max],
- to: shard0}));
+ // Fail if chunk is already at shard
+ assert.commandFailed(
+ mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max], to: shard0}));
- assert.commandWorked(mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max],
- to: shard1}));
- assert.eq(0, mongos.getDB('config').chunks.count({_id: aChunk._id, shard: shard0}));
- assert.eq(1, mongos.getDB('config').chunks.count({_id: aChunk._id, shard: shard1}));
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: ns, bounds: [aChunk.min, aChunk.max], to: shard1}));
+ assert.eq(0, mongos.getDB('config').chunks.count({_id: aChunk._id, shard: shard0}));
+ assert.eq(1, mongos.getDB('config').chunks.count({_id: aChunk._id, shard: shard1}));
- mongos.getDB(kDbName).fooHashed.drop();
-}
+ mongos.getDB(kDbName).fooHashed.drop();
+ }
-function testNotHashed(keyDoc) {
- var ns = kDbName + '.foo';
+ function testNotHashed(keyDoc) {
+ var ns = kDbName + '.foo';
- // Fail if find is not a valid shard key.
- assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
+ // Fail if find is not a valid shard key.
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
- var chunkId = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0})._id;
+ var chunkId = mongos.getDB('config').chunks.findOne({_id: RegExp(ns), shard: shard0})._id;
- assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: {xxx: 1}, to: shard1}));
- assert.eq(shard0, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
+ assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: {xxx: 1}, to: shard1}));
+ assert.eq(shard0, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
- assert.commandWorked(mongos.adminCommand({moveChunk: ns, find: keyDoc, to: shard1}));
- assert.eq(shard1, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
+ assert.commandWorked(mongos.adminCommand({moveChunk: ns, find: keyDoc, to: shard1}));
+ assert.eq(shard1, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
- // Fail if to shard does not exists
- assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: keyDoc, to: 'WrongShard'}));
+ // Fail if to shard does not exists
+ assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: keyDoc, to: 'WrongShard'}));
- // Fail if chunk is already at shard
- assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: keyDoc, to: shard1}));
- assert.eq(shard1, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
+ // Fail if chunk is already at shard
+ assert.commandFailed(mongos.adminCommand({moveChunk: ns, find: keyDoc, to: shard1}));
+ assert.eq(shard1, mongos.getDB('config').chunks.findOne({_id: chunkId}).shard);
- mongos.getDB(kDbName).foo.drop();
-}
+ mongos.getDB(kDbName).foo.drop();
+ }
-assert.commandWorked(mongos.adminCommand({enableSharding : kDbName}));
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
-st.ensurePrimaryShard(kDbName, shard0);
+ st.ensurePrimaryShard(kDbName, shard0);
-// Fail if invalid namespace.
-var res = assert.commandFailed(mongos.adminCommand({moveChunk: '', find: {_id: 1}, to: shard1}));
-assert.eq( res.info);
+ // Fail if invalid namespace.
+ var res =
+ assert.commandFailed(mongos.adminCommand({moveChunk: '', find: {_id: 1}, to: shard1}));
+ assert.eq(res.info);
-// Fail if database does not exist.
-assert.commandFailed(mongos.adminCommand({moveChunk: 'a.b', find: {_id: 1}, to: shard1}));
+ // Fail if database does not exist.
+ assert.commandFailed(mongos.adminCommand({moveChunk: 'a.b', find: {_id: 1}, to: shard1}));
-// Fail if collection is unsharded.
-assert.commandFailed(mongos.adminCommand({moveChunk: kDbName + '.xxx',
- find: {_id: 1}, to: shard1}));
+ // Fail if collection is unsharded.
+ assert.commandFailed(
+ mongos.adminCommand({moveChunk: kDbName + '.xxx', find: {_id: 1}, to: shard1}));
-testHashed();
+ testHashed();
-testNotHashed({a:1});
+ testNotHashed({a: 1});
-testNotHashed({a:1, b:1});
+ testNotHashed({a: 1, b: 1});
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/move_chunk_missing_idx.js b/jstests/sharding/move_chunk_missing_idx.js
index 6171bb539a7..ae3da051e29 100644
--- a/jstests/sharding/move_chunk_missing_idx.js
+++ b/jstests/sharding/move_chunk_missing_idx.js
@@ -3,44 +3,39 @@
* have the index and is not empty.
*/
-var st = new ShardingTest({ shards: 2 });
+var st = new ShardingTest({shards: 2});
var testDB = st.s.getDB('test');
-testDB.adminCommand({ enableSharding: 'test' });
-testDB.adminCommand({ movePrimary: 'test', to: 'shard0001' });
-testDB.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
+testDB.adminCommand({enableSharding: 'test'});
+testDB.adminCommand({movePrimary: 'test', to: 'shard0001'});
+testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
// Test procedure:
// 1. Create index (index should now be in primary shard).
// 2. Split chunk into 3 parts.
// 3. Move 1 chunk to 2nd shard - should have no issues
-testDB.user.ensureIndex({ a: 1, b: 1 });
+testDB.user.ensureIndex({a: 1, b: 1});
-testDB.adminCommand({ split: 'test.user', middle: { x: 0 }});
-testDB.adminCommand({ split: 'test.user', middle: { x: 10 }});
+testDB.adminCommand({split: 'test.user', middle: {x: 0}});
+testDB.adminCommand({split: 'test.user', middle: {x: 10}});
// Collection does not exist, no chunk, index missing case at destination case.
-assert.commandWorked(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0000' }));
+assert.commandWorked(testDB.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: 'shard0000'}));
// Drop index since last moveChunk created this.
-st.d0.getDB('test').user.dropIndex({ a: 1, b: 1 });
+st.d0.getDB('test').user.dropIndex({a: 1, b: 1});
// Collection exist but empty, index missing at destination case.
-assert.commandWorked(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: 10 },
- to: 'shard0000' }));
+assert.commandWorked(testDB.adminCommand({moveChunk: 'test.user', find: {x: 10}, to: 'shard0000'}));
// Drop index since last moveChunk created this.
-st.d0.getDB('test').user.dropIndex({ a: 1, b: 1 });
+st.d0.getDB('test').user.dropIndex({a: 1, b: 1});
// Collection not empty, index missing at destination case.
-testDB.user.insert({ x: 10 });
-assert.commandFailed(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: -10 },
- to: 'shard0000' }));
+testDB.user.insert({x: 10});
+assert.commandFailed(
+ testDB.adminCommand({moveChunk: 'test.user', find: {x: -10}, to: 'shard0000'}));
st.stop();
diff --git a/jstests/sharding/move_primary_basic.js b/jstests/sharding/move_primary_basic.js
index 5e4a9f06a62..288d4fb03e5 100644
--- a/jstests/sharding/move_primary_basic.js
+++ b/jstests/sharding/move_primary_basic.js
@@ -3,58 +3,60 @@
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({mongos:1, shards:2});
+ var st = new ShardingTest({mongos: 1, shards: 2});
-var mongos = st.s0;
+ var mongos = st.s0;
-var kDbName = 'db';
+ var kDbName = 'db';
-var shards = mongos.getCollection('config.shards').find().toArray();
+ var shards = mongos.getCollection('config.shards').find().toArray();
-var shard0 = shards[0]._id;
-var shard1 = shards[1]._id;
+ var shard0 = shards[0]._id;
+ var shard1 = shards[1]._id;
-assert.commandWorked(mongos.adminCommand({enableSharding : kDbName}));
-st.ensurePrimaryShard(kDbName, shard0);
-assert.eq(shard0, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+ st.ensurePrimaryShard(kDbName, shard0);
+ assert.eq(shard0, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
-// Can run only on mongos.
-assert.commandFailedWithCode(st.d0.getDB('admin').runCommand({movePrimary : kDbName, to: shard0}),
- ErrorCodes.CommandNotFound);
+ // Can run only on mongos.
+ assert.commandFailedWithCode(
+ st.d0.getDB('admin').runCommand({movePrimary: kDbName, to: shard0}),
+ ErrorCodes.CommandNotFound);
-// Can run only against the admin database.
-assert.commandFailedWithCode(mongos.getDB('test').runCommand({movePrimary : kDbName, to: shard0}),
- ErrorCodes.Unauthorized);
+ // Can run only against the admin database.
+ assert.commandFailedWithCode(
+ mongos.getDB('test').runCommand({movePrimary: kDbName, to: shard0}),
+ ErrorCodes.Unauthorized);
-// Can't movePrimary for 'config' database.
-assert.commandFailed(mongos.adminCommand({movePrimary : 'config', to: shard0}));
+ // Can't movePrimary for 'config' database.
+ assert.commandFailed(mongos.adminCommand({movePrimary: 'config', to: shard0}));
-// Can't movePrimary for 'local' database.
-assert.commandFailed(mongos.adminCommand({movePrimary : 'local', to: shard0}));
+ // Can't movePrimary for 'local' database.
+ assert.commandFailed(mongos.adminCommand({movePrimary: 'local', to: shard0}));
-// Can't movePrimary for 'admin' database.
-assert.commandFailed(mongos.adminCommand({movePrimary : 'admin', to: shard0}));
+ // Can't movePrimary for 'admin' database.
+ assert.commandFailed(mongos.adminCommand({movePrimary: 'admin', to: shard0}));
-// Can't movePrimary for invalid db name.
-assert.commandFailed(mongos.adminCommand({movePrimary : 'a.b', to: shard0}));
-assert.commandFailed(mongos.adminCommand({movePrimary : '', to: shard0}));
+ // Can't movePrimary for invalid db name.
+ assert.commandFailed(mongos.adminCommand({movePrimary: 'a.b', to: shard0}));
+ assert.commandFailed(mongos.adminCommand({movePrimary: '', to: shard0}));
-// Fail if shard does not exist or empty.
-assert.commandFailed(mongos.adminCommand({movePrimary : kDbName, to: 'Unknown'}));
-assert.commandFailed(mongos.adminCommand({movePrimary : kDbName, to: ''}));
-assert.commandFailed(mongos.adminCommand({movePrimary : kDbName}));
+ // Fail if shard does not exist or empty.
+ assert.commandFailed(mongos.adminCommand({movePrimary: kDbName, to: 'Unknown'}));
+ assert.commandFailed(mongos.adminCommand({movePrimary: kDbName, to: ''}));
+ assert.commandFailed(mongos.adminCommand({movePrimary: kDbName}));
-// Fail if moveShard to already primary and verify metadata changes.
-assert.eq(shard0, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
+ // Fail if moveShard to already primary and verify metadata changes.
+ assert.eq(shard0, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
-assert.commandWorked(mongos.adminCommand({movePrimary : kDbName, to: shard1}));
-assert.eq(shard1, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
+ assert.commandWorked(mongos.adminCommand({movePrimary: kDbName, to: shard1}));
+ assert.eq(shard1, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
-assert.commandFailed(mongos.adminCommand({movePrimary : kDbName, to: shard1}));
-assert.eq(shard1, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
+ assert.commandFailed(mongos.adminCommand({movePrimary: kDbName, to: shard1}));
+ assert.eq(shard1, mongos.getDB('config').databases.findOne({_id: kDbName}).primary);
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/move_stale_mongos.js b/jstests/sharding/move_stale_mongos.js
index beaa7d34987..a7d1f647f61 100644
--- a/jstests/sharding/move_stale_mongos.js
+++ b/jstests/sharding/move_stale_mongos.js
@@ -16,9 +16,8 @@ var curShardIndex = 0;
for (var i = 0; i < 100; i += 10) {
assert.commandWorked(st.s0.getDB('admin').runCommand({split: testNs, middle: {_id: i}}));
var nextShardIndex = (curShardIndex + 1) % shards.length;
- assert.commandWorked(st.s1.getDB('admin').runCommand({moveChunk: testNs,
- find: {_id: i + 5},
- to: shards[nextShardIndex]}));
+ assert.commandWorked(st.s1.getDB('admin').runCommand(
+ {moveChunk: testNs, find: {_id: i + 5}, to: shards[nextShardIndex]}));
curShardIndex = nextShardIndex;
}
diff --git a/jstests/sharding/movechunk_include.js b/jstests/sharding/movechunk_include.js
index e8821be922b..7c20f0d675f 100644
--- a/jstests/sharding/movechunk_include.js
+++ b/jstests/sharding/movechunk_include.js
@@ -1,41 +1,43 @@
function setupMoveChunkTest(st) {
- //Stop Balancer
+ // Stop Balancer
st.stopBalancer();
- var testdb = st.getDB( "test" );
+ var testdb = st.getDB("test");
var testcoll = testdb.foo;
- st.adminCommand( { enablesharding : "test" } );
+ st.adminCommand({enablesharding: "test"});
st.ensurePrimaryShard('test', 'shard0001');
- st.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+ st.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
var str = "";
- while ( str.length < 10000 ) {
+ while (str.length < 10000) {
str += "asdasdsdasdasdasdas";
}
var data = 0;
var num = 0;
- //Insert till you get to 10MB of data
+ // Insert till you get to 10MB of data
var bulk = testcoll.initializeUnorderedBulkOp();
- while ( data < ( 1024 * 1024 * 10 ) ) {
- bulk.insert({ _id: num++, s: str });
+ while (data < (1024 * 1024 * 10)) {
+ bulk.insert({_id: num++, s: str});
data += str.length;
}
assert.writeOK(bulk.execute());
- var stats = st.chunkCounts( "foo" );
+ var stats = st.chunkCounts("foo");
var to = "";
- for ( shard in stats ){
- if ( stats[shard] == 0 ) {
+ for (shard in stats) {
+ if (stats[shard] == 0) {
to = shard;
break;
}
}
- var result = st.adminCommand( { movechunk : "test.foo" ,
- find : { _id : 1 } ,
- to : to ,
- _waitForDelete : true} ); //some tests need this...
- assert(result, "movechunk failed: " + tojson( result ) );
+ var result = st.adminCommand({
+ movechunk: "test.foo",
+ find: {_id: 1},
+ to: to,
+ _waitForDelete: true
+ }); // some tests need this...
+ assert(result, "movechunk failed: " + tojson(result));
}
diff --git a/jstests/sharding/movechunk_with_default_paranoia.js b/jstests/sharding/movechunk_with_default_paranoia.js
index 250816a15b8..a6f4704ec90 100644
--- a/jstests/sharding/movechunk_with_default_paranoia.js
+++ b/jstests/sharding/movechunk_with_default_paranoia.js
@@ -2,14 +2,17 @@
* This test checks that moveParanoia defaults to off (ie the moveChunk directory will not
* be created).
*/
-var st = new ShardingTest( { shards:2, mongos:1 , other : { chunkSize: 1 }});
+var st = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1}});
load("jstests/sharding/movechunk_include.js");
setupMoveChunkTest(st);
var shards = [st.shard0, st.shard1];
-for(i in shards) {
+for (i in shards) {
var dbpath = shards[i].adminCommand("getCmdLineOpts").parsed.storage.dbPath;
- var hasMoveChunkDir = 0 != ls(dbpath).filter(function(a) {return null != a.match("moveChunk");}).length;
+ var hasMoveChunkDir = 0 !=
+ ls(dbpath).filter(function(a) {
+ return null != a.match("moveChunk");
+ }).length;
assert(!hasMoveChunkDir, dbpath + ": has MoveChunk directory + " + ls(dbpath));
}
st.stop();
diff --git a/jstests/sharding/movechunk_with_moveParanoia.js b/jstests/sharding/movechunk_with_moveParanoia.js
index f643e3aae0a..96348d827bf 100644
--- a/jstests/sharding/movechunk_with_moveParanoia.js
+++ b/jstests/sharding/movechunk_with_moveParanoia.js
@@ -1,20 +1,20 @@
/**
* This test sets moveParanoia flag and then check that the directory is created with the moved data
*/
-var st = new ShardingTest( { shards: 2,
- mongos:1,
- other : {
- chunkSize: 1,
- shardOptions: { moveParanoia:"" }}});
+var st = new ShardingTest(
+ {shards: 2, mongos: 1, other: {chunkSize: 1, shardOptions: {moveParanoia: ""}}});
load("jstests/sharding/movechunk_include.js");
setupMoveChunkTest(st);
var shards = [st.shard0, st.shard1];
var foundMoveChunk = false;
-for(i in shards) {
+for (i in shards) {
var dbpath = shards[i].adminCommand("getCmdLineOpts").parsed.storage.dbPath;
- var hasMoveChunkDir = 0 != ls(dbpath).filter(function(a) {return null != a.match("moveChunk");}).length;
+ var hasMoveChunkDir = 0 !=
+ ls(dbpath).filter(function(a) {
+ return null != a.match("moveChunk");
+ }).length;
foundMoveChunk = foundMoveChunk || hasMoveChunkDir;
}
diff --git a/jstests/sharding/movechunk_with_noMoveParanoia.js b/jstests/sharding/movechunk_with_noMoveParanoia.js
index 0e2f6bc2248..ae8ef5899a8 100644
--- a/jstests/sharding/movechunk_with_noMoveParanoia.js
+++ b/jstests/sharding/movechunk_with_noMoveParanoia.js
@@ -1,19 +1,19 @@
/**
* This test sets moveParanoia flag and then check that the directory is created with the moved data
*/
-var st = new ShardingTest( { shards: 2,
- mongos:1,
- other : {
- chunkSize: 1,
- shardOptions: { noMoveParanoia:"" }}});
+var st = new ShardingTest(
+ {shards: 2, mongos: 1, other: {chunkSize: 1, shardOptions: {noMoveParanoia: ""}}});
load("jstests/sharding/movechunk_include.js");
setupMoveChunkTest(st);
var shards = [st.shard0, st.shard1];
-for(i in shards) {
+for (i in shards) {
var dbpath = shards[i].adminCommand("getCmdLineOpts").parsed.storage.dbPath;
- var hasMoveChunkDir = 0 != ls(dbpath).filter(function(a) {return null != a.match("moveChunk");}).length;
+ var hasMoveChunkDir = 0 !=
+ ls(dbpath).filter(function(a) {
+ return null != a.match("moveChunk");
+ }).length;
assert(!hasMoveChunkDir, dbpath + ": has MoveChunk directory + " + ls(dbpath));
}
st.stop();
diff --git a/jstests/sharding/moveprimary_ignore_sharded.js b/jstests/sharding/moveprimary_ignore_sharded.js
index 9b608279c77..f73f50939cc 100644
--- a/jstests/sharding/moveprimary_ignore_sharded.js
+++ b/jstests/sharding/moveprimary_ignore_sharded.js
@@ -1,5 +1,5 @@
// Checks that movePrimary doesn't move collections detected as sharded when it begins moving
-var st = new ShardingTest({ shards : 2, mongos : 2, verbose : 1 });
+var st = new ShardingTest({shards: 2, mongos: 2, verbose: 1});
// Stop balancer, otherwise mongosB may load information about the database non-deterministically
st.stopBalancer();
@@ -7,51 +7,51 @@ st.stopBalancer();
var mongosA = st.s0;
var mongosB = st.s1;
-var adminA = mongosA.getDB( "admin" );
-var adminB = mongosB.getDB( "admin" );
+var adminA = mongosA.getDB("admin");
+var adminB = mongosB.getDB("admin");
-var configA = mongosA.getDB( "config" );
-var configB = mongosB.getDB( "config" );
+var configA = mongosA.getDB("config");
+var configB = mongosB.getDB("config");
// Populate some data
-assert.writeOK(mongosA.getCollection("foo.coll0").insert({ hello : "world" }));
-assert.writeOK(mongosA.getCollection("bar.coll0").insert({ hello : "world" }));
-assert.writeOK(mongosA.getCollection("foo.coll1").insert({ hello : "world" }));
-assert.writeOK(mongosA.getCollection("bar.coll1").insert({ hello : "world" }));
-assert.writeOK(mongosA.getCollection("foo.coll2").insert({ hello : "world" }));
-assert.writeOK(mongosA.getCollection("bar.coll2").insert({ hello : "world" }));
+assert.writeOK(mongosA.getCollection("foo.coll0").insert({hello: "world"}));
+assert.writeOK(mongosA.getCollection("bar.coll0").insert({hello: "world"}));
+assert.writeOK(mongosA.getCollection("foo.coll1").insert({hello: "world"}));
+assert.writeOK(mongosA.getCollection("bar.coll1").insert({hello: "world"}));
+assert.writeOK(mongosA.getCollection("foo.coll2").insert({hello: "world"}));
+assert.writeOK(mongosA.getCollection("bar.coll2").insert({hello: "world"}));
// Enable sharding
-printjson( adminA.runCommand({ enableSharding : "foo" }) );
+printjson(adminA.runCommand({enableSharding: "foo"}));
st.ensurePrimaryShard('foo', 'shard0001');
-printjson( adminA.runCommand({ enableSharding : "bar" }) );
+printjson(adminA.runCommand({enableSharding: "bar"}));
st.ensurePrimaryShard('bar', 'shard0000');
// Setup three collections per-db
// 0 : not sharded
// 1 : sharded
// 2 : sharded but not seen as sharded by mongosB
-printjson( adminA.runCommand({ shardCollection : "foo.coll1", key : { _id : 1 } }) );
-printjson( adminA.runCommand({ shardCollection : "foo.coll2", key : { _id : 1 } }) );
-printjson( adminA.runCommand({ shardCollection : "bar.coll1", key : { _id : 1 } }) );
-printjson( adminA.runCommand({ shardCollection : "bar.coll2", key : { _id : 1 } }) );
+printjson(adminA.runCommand({shardCollection: "foo.coll1", key: {_id: 1}}));
+printjson(adminA.runCommand({shardCollection: "foo.coll2", key: {_id: 1}}));
+printjson(adminA.runCommand({shardCollection: "bar.coll1", key: {_id: 1}}));
+printjson(adminA.runCommand({shardCollection: "bar.coll2", key: {_id: 1}}));
// All collections are now on primary shard
-var fooPrimaryShard = configA.databases.findOne({ _id : "foo" }).primary;
-var barPrimaryShard = configA.databases.findOne({ _id : "bar" }).primary;
+var fooPrimaryShard = configA.databases.findOne({_id: "foo"}).primary;
+var barPrimaryShard = configA.databases.findOne({_id: "bar"}).primary;
var shards = configA.shards.find().toArray();
-var fooPrimaryShard = fooPrimaryShard == shards[0]._id ? shards[0] : shards[1];
-var fooOtherShard = fooPrimaryShard._id == shards[0]._id ? shards[1] : shards[0];
-var barPrimaryShard = barPrimaryShard == shards[0]._id ? shards[0] : shards[1];
-var barOtherShard = barPrimaryShard._id == shards[0]._id ? shards[1] : shards[0];
+var fooPrimaryShard = fooPrimaryShard == shards[0]._id ? shards[0] : shards[1];
+var fooOtherShard = fooPrimaryShard._id == shards[0]._id ? shards[1] : shards[0];
+var barPrimaryShard = barPrimaryShard == shards[0]._id ? shards[0] : shards[1];
+var barOtherShard = barPrimaryShard._id == shards[0]._id ? shards[1] : shards[0];
st.printShardingStatus();
-jsTest.log( "Running movePrimary for foo through mongosA ..." );
+jsTest.log("Running movePrimary for foo through mongosA ...");
// MongosA should already know about all the collection states
-printjson( adminA.runCommand({ movePrimary : "foo", to : fooOtherShard._id }) );
+printjson(adminA.runCommand({movePrimary: "foo", to: fooOtherShard._id}));
if (st.configRS) {
// If we are in CSRS mode need to make sure that mongosB will actually get the most recent
@@ -59,31 +59,30 @@ if (st.configRS) {
st.configRS.awaitLastOpCommitted();
}
-
// All collections still correctly sharded / unsharded
-assert.neq( null, mongosA.getCollection("foo.coll0").findOne() );
-assert.neq( null, mongosA.getCollection("foo.coll1").findOne() );
-assert.neq( null, mongosA.getCollection("foo.coll2").findOne() );
+assert.neq(null, mongosA.getCollection("foo.coll0").findOne());
+assert.neq(null, mongosA.getCollection("foo.coll1").findOne());
+assert.neq(null, mongosA.getCollection("foo.coll2").findOne());
-assert.neq( null, mongosB.getCollection("foo.coll0").findOne() );
-assert.neq( null, mongosB.getCollection("foo.coll1").findOne() );
-assert.neq( null, mongosB.getCollection("foo.coll2").findOne() );
+assert.neq(null, mongosB.getCollection("foo.coll0").findOne());
+assert.neq(null, mongosB.getCollection("foo.coll1").findOne());
+assert.neq(null, mongosB.getCollection("foo.coll2").findOne());
-function realCollectionCount( mydb ) {
+function realCollectionCount(mydb) {
var num = 0;
- mydb.getCollectionNames().forEach( function(z) {
- if ( z.startsWith( "coll" ) )
+ mydb.getCollectionNames().forEach(function(z) {
+ if (z.startsWith("coll"))
num++;
- } );
+ });
return num;
}
// All collections sane
-assert.eq( 2, realCollectionCount( new Mongo( fooPrimaryShard.host ).getDB( "foo" ) ) );
-assert.eq( 1, realCollectionCount( new Mongo( fooOtherShard.host ).getDB( "foo" ) ) );
+assert.eq(2, realCollectionCount(new Mongo(fooPrimaryShard.host).getDB("foo")));
+assert.eq(1, realCollectionCount(new Mongo(fooOtherShard.host).getDB("foo")));
-jsTest.log( "Running movePrimary for bar through mongosB ..." );
-printjson( adminB.runCommand({ movePrimary : "bar", to : barOtherShard._id }) );
+jsTest.log("Running movePrimary for bar through mongosB ...");
+printjson(adminB.runCommand({movePrimary: "bar", to: barOtherShard._id}));
// We need to flush the cluster config on mongosA, so it can discover that database 'bar' got
// moved. Otherwise since the collections are not sharded, we have no way of discovering this.
@@ -93,19 +92,19 @@ if (st.configRS) {
// the most recent config data.
st.configRS.awaitLastOpCommitted();
}
-assert.commandWorked(adminA.runCommand({ flushRouterConfig : 1 }));
+assert.commandWorked(adminA.runCommand({flushRouterConfig: 1}));
// All collections still correctly sharded / unsharded
-assert.neq( null, mongosA.getCollection("bar.coll0").findOne() );
-assert.neq( null, mongosA.getCollection("bar.coll1").findOne() );
-assert.neq( null, mongosA.getCollection("bar.coll2").findOne() );
+assert.neq(null, mongosA.getCollection("bar.coll0").findOne());
+assert.neq(null, mongosA.getCollection("bar.coll1").findOne());
+assert.neq(null, mongosA.getCollection("bar.coll2").findOne());
-assert.neq( null, mongosB.getCollection("bar.coll0").findOne() );
-assert.neq( null, mongosB.getCollection("bar.coll1").findOne() );
-assert.neq( null, mongosB.getCollection("bar.coll2").findOne() );
+assert.neq(null, mongosB.getCollection("bar.coll0").findOne());
+assert.neq(null, mongosB.getCollection("bar.coll1").findOne());
+assert.neq(null, mongosB.getCollection("bar.coll2").findOne());
// All collections sane
-assert.eq( 2, realCollectionCount( new Mongo( barPrimaryShard.host ).getDB( "bar" ) ) );
-assert.eq( 1, realCollectionCount( new Mongo( barOtherShard.host ).getDB( "bar" ) ) );
+assert.eq(2, realCollectionCount(new Mongo(barPrimaryShard.host).getDB("bar")));
+assert.eq(1, realCollectionCount(new Mongo(barOtherShard.host).getDB("bar")));
st.stop();
diff --git a/jstests/sharding/mrShardedOutput.js b/jstests/sharding/mrShardedOutput.js
index a4dac1db3d2..ab3bae28d74 100644
--- a/jstests/sharding/mrShardedOutput.js
+++ b/jstests/sharding/mrShardedOutput.js
@@ -4,17 +4,21 @@
// collection input twice the size of the first and outputs it to the new sharded
// collection created in the first pass.
-var st = new ShardingTest({ shards: 2, other: { chunkSize: 1 }});
+var st = new ShardingTest({shards: 2, other: {chunkSize: 1}});
var config = st.getDB("config");
-st.adminCommand( { enablesharding: "test" } );
-st.getDB("admin").runCommand( { movePrimary: "test", to: "shard0001"});
-st.adminCommand( { shardcollection: "test.foo", key: { "a": 1 } } );
+st.adminCommand({enablesharding: "test"});
+st.getDB("admin").runCommand({movePrimary: "test", to: "shard0001"});
+st.adminCommand({shardcollection: "test.foo", key: {"a": 1}});
-var testDB = st.getDB( "test" );
+var testDB = st.getDB("test");
-function map2() { emit(this.i, { count: 1, y: this.y }); }
-function reduce2(key, values) { return values[0]; }
+function map2() {
+ emit(this.i, {count: 1, y: this.y});
+}
+function reduce2(key, values) {
+ return values[0];
+}
var numDocs = 0;
var numBatch = 5000;
@@ -24,17 +28,17 @@ var str = new Array(1024).join('a');
// M/R is strange in that it chooses the output shards based on currently sharded
// collections in the database. The upshot is that we need a sharded collection on
// both shards in order to ensure M/R will output to two shards.
-st.adminCommand({ split: 'test.foo', middle: { a: numDocs + numBatch / 2 }});
-st.adminCommand({ moveChunk: 'test.foo', find: { a: numDocs }, to: 'shard0000' });
+st.adminCommand({split: 'test.foo', middle: {a: numDocs + numBatch / 2}});
+st.adminCommand({moveChunk: 'test.foo', find: {a: numDocs}, to: 'shard0000'});
// Add some more data for input so that chunks will get split further
for (var splitPoint = 0; splitPoint < numBatch; splitPoint += 400) {
- testDB.adminCommand({ split: 'test.foo', middle: { a: splitPoint }});
+ testDB.adminCommand({split: 'test.foo', middle: {a: splitPoint}});
}
var bulk = testDB.foo.initializeUnorderedBulkOp();
for (var i = 0; i < numBatch; ++i) {
- bulk.insert({ a: numDocs + i, y: str, i: numDocs + i });
+ bulk.insert({a: numDocs + i, y: str, i: numDocs + i});
}
assert.writeOK(bulk.execute());
@@ -46,55 +50,58 @@ var res = testDB.foo.mapReduce(map2, reduce2, {out: {replace: "mrShardedOut", sh
jsTest.log("MapReduce results:" + tojson(res));
var reduceOutputCount = res.counts.output;
-assert.eq(numDocs, reduceOutputCount,
- "MapReduce FAILED: res.counts.output = " + reduceOutputCount +
- ", should be " + numDocs);
+assert.eq(numDocs,
+ reduceOutputCount,
+ "MapReduce FAILED: res.counts.output = " + reduceOutputCount + ", should be " + numDocs);
jsTest.log("Checking that all MapReduce output documents are in output collection");
var outColl = testDB["mrShardedOut"];
var outCollCount = outColl.find().itcount();
-assert.eq(numDocs, outCollCount,
- "MapReduce FAILED: outColl.find().itcount() = " + outCollCount +
- ", should be " + numDocs +
- ": this may happen intermittently until resolution of SERVER-3627");
+assert.eq(numDocs,
+ outCollCount,
+ "MapReduce FAILED: outColl.find().itcount() = " + outCollCount + ", should be " +
+ numDocs + ": this may happen intermittently until resolution of SERVER-3627");
// Make sure it's sharded and split
var newNumChunks = config.chunks.count({ns: testDB.mrShardedOut._fullName});
-assert.gt(newNumChunks, 1,
+assert.gt(newNumChunks,
+ 1,
"Sharding FAILURE: " + testDB.mrShardedOut._fullName + " has only 1 chunk");
// Check that there are no "jumbo" chunks.
var objSize = Object.bsonsize(testDB.mrShardedOut.findOne());
-var docsPerChunk = 1024 * 1024 / objSize * 1.1; // 1MB chunk size + allowance
+var docsPerChunk = 1024 * 1024 / objSize * 1.1; // 1MB chunk size + allowance
st.printShardingStatus(true);
-config.chunks.find({ ns: testDB.mrShardedOut.getFullName() }).forEach(function(chunkDoc) {
- var count = testDB.mrShardedOut.find({ _id: { $gte: chunkDoc.min._id,
- $lt: chunkDoc.max._id }}).itcount();
- assert.lte(count, docsPerChunk, 'Chunk has too many docs: ' + tojson(chunkDoc));
-});
+config.chunks.find({ns: testDB.mrShardedOut.getFullName()})
+ .forEach(function(chunkDoc) {
+ var count =
+ testDB.mrShardedOut.find({_id: {$gte: chunkDoc.min._id, $lt: chunkDoc.max._id}})
+ .itcount();
+ assert.lte(count, docsPerChunk, 'Chunk has too many docs: ' + tojson(chunkDoc));
+ });
// Check that chunks for the newly created sharded output collection are well distributed.
-var shard0Chunks = config.chunks.find({ ns: testDB.mrShardedOut._fullName,
- shard: 'shard0000' }).count();
-var shard1Chunks = config.chunks.find({ ns: testDB.mrShardedOut._fullName,
- shard: 'shard0001' }).count();
+var shard0Chunks =
+ config.chunks.find({ns: testDB.mrShardedOut._fullName, shard: 'shard0000'}).count();
+var shard1Chunks =
+ config.chunks.find({ns: testDB.mrShardedOut._fullName, shard: 'shard0001'}).count();
assert.lte(Math.abs(shard0Chunks - shard1Chunks), 1);
jsTest.log('Starting second pass');
-st.adminCommand({ split: 'test.foo', middle: { a: numDocs + numBatch / 2 }});
-st.adminCommand({ moveChunk: 'test.foo', find: { a: numDocs }, to: 'shard0000' });
+st.adminCommand({split: 'test.foo', middle: {a: numDocs + numBatch / 2}});
+st.adminCommand({moveChunk: 'test.foo', find: {a: numDocs}, to: 'shard0000'});
// Add some more data for input so that chunks will get split further
for (splitPoint = 0; splitPoint < numBatch; splitPoint += 400) {
- testDB.adminCommand({ split: 'test.foo', middle: { a: numDocs + splitPoint }});
+ testDB.adminCommand({split: 'test.foo', middle: {a: numDocs + splitPoint}});
}
bulk = testDB.foo.initializeUnorderedBulkOp();
for (var i = 0; i < numBatch; ++i) {
- bulk.insert({ a: numDocs + i, y: str, i: numDocs + i });
+ bulk.insert({a: numDocs + i, y: str, i: numDocs + i});
}
assert.writeOK(bulk.execute());
jsTest.log("No errors on insert batch.");
@@ -106,21 +113,22 @@ res = testDB.foo.mapReduce(map2, reduce2, {out: {replace: "mrShardedOut", sharde
jsTest.log("MapReduce results:" + tojson(res));
reduceOutputCount = res.counts.output;
-assert.eq(numDocs, reduceOutputCount,
- "MapReduce FAILED: res.counts.output = " + reduceOutputCount +
- ", should be " + numDocs);
+assert.eq(numDocs,
+ reduceOutputCount,
+ "MapReduce FAILED: res.counts.output = " + reduceOutputCount + ", should be " + numDocs);
jsTest.log("Checking that all MapReduce output documents are in output collection");
outColl = testDB["mrShardedOut"];
outCollCount = outColl.find().itcount();
-assert.eq(numDocs, outCollCount,
- "MapReduce FAILED: outColl.find().itcount() = " + outCollCount +
- ", should be " + numDocs +
- ": this may happen intermittently until resolution of SERVER-3627");
+assert.eq(numDocs,
+ outCollCount,
+ "MapReduce FAILED: outColl.find().itcount() = " + outCollCount + ", should be " +
+ numDocs + ": this may happen intermittently until resolution of SERVER-3627");
// Make sure it's sharded and split
newNumChunks = config.chunks.count({ns: testDB.mrShardedOut._fullName});
-assert.gt(newNumChunks, 1,
+assert.gt(newNumChunks,
+ 1,
"Sharding FAILURE: " + testDB.mrShardedOut._fullName + " has only 1 chunk");
st.printShardingStatus(true);
@@ -138,4 +146,3 @@ config.chunks.find({ ns: testDB.mrShardedOut.getFullName() }).forEach(function(c
// to balance chunks.
st.stop();
-
diff --git a/jstests/sharding/mrShardedOutputAuth.js b/jstests/sharding/mrShardedOutputAuth.js
index ed88e34aeed..acbb01f6794 100644
--- a/jstests/sharding/mrShardedOutputAuth.js
+++ b/jstests/sharding/mrShardedOutputAuth.js
@@ -6,91 +6,86 @@
(function() {
-function doMapReduce(connection, outputDb) {
- // clean output db and run m/r
- outputDb.numbers_out.drop();
- printjson(connection.getDB('input').runCommand(
- {
- mapreduce : "numbers",
- map : function() {
- emit(this.num, {count:1});
+ function doMapReduce(connection, outputDb) {
+ // clean output db and run m/r
+ outputDb.numbers_out.drop();
+ printjson(connection.getDB('input').runCommand({
+ mapreduce: "numbers",
+ map: function() {
+ emit(this.num, {count: 1});
},
- reduce : function(k, values) {
+ reduce: function(k, values) {
var result = {};
- values.forEach( function(value) {
+ values.forEach(function(value) {
result.count = 1;
});
return result;
},
- out : {
- merge : "numbers_out",
- sharded : true,
- db : "output"
- },
- verbose : true,
- query : {}
- }
- ));
-}
-
-function assertSuccess(configDb, outputDb) {
- assert.eq(outputDb.numbers_out.count(), 50, "map/reduce failed");
- assert( ! configDb.collections.findOne().dropped, "no sharded collections");
-}
-
-function assertFailure(configDb, outputDb) {
- assert.eq(outputDb.numbers_out.count(), 0, "map/reduce should not have succeeded");
-}
-
-
-var st = new ShardingTest({ name: "mrShardedOutputAuth",
- shards: 1,
- mongos: 1,
- other: { extraOptions : {"keyFile" : "jstests/libs/key1"} } });
-
-// Setup the users to the input, output and admin databases
-var mongos = st.s;
-var adminDb = mongos.getDB("admin");
-adminDb.createUser({user: "user", pwd: "pass", roles: jsTest.adminUserRoles});
-
-var authenticatedConn = new Mongo(mongos.host);
-authenticatedConn.getDB('admin').auth("user", "pass");
-adminDb = authenticatedConn.getDB("admin");
-
-var configDb = authenticatedConn.getDB("config");
-
-var inputDb = authenticatedConn.getDB("input");
-inputDb.createUser({user: "user", pwd: "pass", roles: jsTest.basicUserRoles});
-
-var outputDb = authenticatedConn.getDB("output");
-outputDb.createUser({user: "user", pwd: "pass", roles: jsTest.basicUserRoles});
-
-// Setup the input db
-inputDb.numbers.drop();
-for (var i = 0; i < 50; i++) {
- inputDb.numbers.insert({ num : i });
-}
-assert.eq(inputDb.numbers.count(), 50);
-
-// Setup a connection authenticated to both input and output db
-var inputOutputAuthConn = new Mongo(mongos.host);
-inputOutputAuthConn.getDB('input').auth("user", "pass");
-inputOutputAuthConn.getDB('output').auth("user", "pass");
-doMapReduce(inputOutputAuthConn, outputDb);
-assertSuccess(configDb, outputDb);
-
-// setup a connection authenticated to only input db
-var inputAuthConn = new Mongo(mongos.host);
-inputAuthConn.getDB('input').auth("user", "pass");
-doMapReduce(inputAuthConn, outputDb);
-assertFailure(configDb, outputDb);
-
-// setup a connection authenticated to only output db
-var outputAuthConn = new Mongo(mongos.host);
-outputAuthConn.getDB('output').auth("user", "pass");
-doMapReduce(outputAuthConn, outputDb);
-assertFailure(configDb, outputDb);
-
-st.stop();
+ out: {merge: "numbers_out", sharded: true, db: "output"},
+ verbose: true,
+ query: {}
+ }));
+ }
+
+ function assertSuccess(configDb, outputDb) {
+ assert.eq(outputDb.numbers_out.count(), 50, "map/reduce failed");
+ assert(!configDb.collections.findOne().dropped, "no sharded collections");
+ }
+
+ function assertFailure(configDb, outputDb) {
+ assert.eq(outputDb.numbers_out.count(), 0, "map/reduce should not have succeeded");
+ }
+
+ var st = new ShardingTest({
+ name: "mrShardedOutputAuth",
+ shards: 1,
+ mongos: 1,
+ other: {extraOptions: {"keyFile": "jstests/libs/key1"}}
+ });
+
+ // Setup the users to the input, output and admin databases
+ var mongos = st.s;
+ var adminDb = mongos.getDB("admin");
+ adminDb.createUser({user: "user", pwd: "pass", roles: jsTest.adminUserRoles});
+
+ var authenticatedConn = new Mongo(mongos.host);
+ authenticatedConn.getDB('admin').auth("user", "pass");
+ adminDb = authenticatedConn.getDB("admin");
+
+ var configDb = authenticatedConn.getDB("config");
+
+ var inputDb = authenticatedConn.getDB("input");
+ inputDb.createUser({user: "user", pwd: "pass", roles: jsTest.basicUserRoles});
+
+ var outputDb = authenticatedConn.getDB("output");
+ outputDb.createUser({user: "user", pwd: "pass", roles: jsTest.basicUserRoles});
+
+ // Setup the input db
+ inputDb.numbers.drop();
+ for (var i = 0; i < 50; i++) {
+ inputDb.numbers.insert({num: i});
+ }
+ assert.eq(inputDb.numbers.count(), 50);
+
+ // Setup a connection authenticated to both input and output db
+ var inputOutputAuthConn = new Mongo(mongos.host);
+ inputOutputAuthConn.getDB('input').auth("user", "pass");
+ inputOutputAuthConn.getDB('output').auth("user", "pass");
+ doMapReduce(inputOutputAuthConn, outputDb);
+ assertSuccess(configDb, outputDb);
+
+ // setup a connection authenticated to only input db
+ var inputAuthConn = new Mongo(mongos.host);
+ inputAuthConn.getDB('input').auth("user", "pass");
+ doMapReduce(inputAuthConn, outputDb);
+ assertFailure(configDb, outputDb);
+
+ // setup a connection authenticated to only output db
+ var outputAuthConn = new Mongo(mongos.host);
+ outputAuthConn.getDB('output').auth("user", "pass");
+ doMapReduce(outputAuthConn, outputDb);
+ assertFailure(configDb, outputDb);
+
+ st.stop();
})();
diff --git a/jstests/sharding/mr_and_agg_versioning.js b/jstests/sharding/mr_and_agg_versioning.js
index 0167a23554d..e2d1c6f7869 100644
--- a/jstests/sharding/mr_and_agg_versioning.js
+++ b/jstests/sharding/mr_and_agg_versioning.js
@@ -1,64 +1,65 @@
// Test that map reduce and aggregate properly handle shard versioning.
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({shards: 2, mongos: 3});
+ var st = new ShardingTest({shards: 2, mongos: 3});
-var dbName = jsTest.name();
-var collName = dbName + ".coll";
-var numDocs = 50000;
-var numKeys = 1000;
+ var dbName = jsTest.name();
+ var collName = dbName + ".coll";
+ var numDocs = 50000;
+ var numKeys = 1000;
-st.s.adminCommand({enableSharding: dbName});
-st.ensurePrimaryShard(dbName, 'shard0000');
-st.s.adminCommand({shardCollection: collName, key: {key: 1}});
+ st.s.adminCommand({enableSharding: dbName});
+ st.ensurePrimaryShard(dbName, 'shard0000');
+ st.s.adminCommand({shardCollection: collName, key: {key: 1}});
-// Load chunk data to the stale mongoses before moving a chunk
-var staleMongos1 = st.s1;
-var staleMongos2 = st.s2;
-staleMongos1.getCollection(collName).find().itcount();
-staleMongos2.getCollection(collName).find().itcount();
+ // Load chunk data to the stale mongoses before moving a chunk
+ var staleMongos1 = st.s1;
+ var staleMongos2 = st.s2;
+ staleMongos1.getCollection(collName).find().itcount();
+ staleMongos2.getCollection(collName).find().itcount();
-st.s.adminCommand({split: collName, middle: {key: numKeys/2}});
-st.s.adminCommand({moveChunk: collName, find: {key: 0}, to: 'shard0001'});
+ st.s.adminCommand({split: collName, middle: {key: numKeys / 2}});
+ st.s.adminCommand({moveChunk: collName, find: {key: 0}, to: 'shard0001'});
-var bulk = st.s.getCollection(collName).initializeUnorderedBulkOp();
-for(var i = 0; i < numDocs; i++) {
- bulk.insert({_id: i, key: (i % numKeys), value: i % numKeys});
-}
-assert.writeOK(bulk.execute());
+ var bulk = st.s.getCollection(collName).initializeUnorderedBulkOp();
+ for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, key: (i % numKeys), value: i % numKeys});
+ }
+ assert.writeOK(bulk.execute());
-// Add orphaned documents directly to the shards to ensure they are properly filtered out.
-st.shard0.getCollection(collName).insert({_id: 0, key: 0, value: 0});
-st.shard1.getCollection(collName).insert({_id: numDocs, key: numKeys, value: numKeys});
+ // Add orphaned documents directly to the shards to ensure they are properly filtered out.
+ st.shard0.getCollection(collName).insert({_id: 0, key: 0, value: 0});
+ st.shard1.getCollection(collName).insert({_id: numDocs, key: numKeys, value: numKeys});
-jsTest.log("Doing mapReduce");
+ jsTest.log("Doing mapReduce");
-var map = function(){ emit( this.key, this.value ); };
-var reduce = function(k, values){
- var total = 0;
- for(var i = 0; i < values.length; i++) {
- total += values[i];
- }
- return total;
-};
-function validateOutput(output) {
- assert.eq(output.length, numKeys, tojson(output));
- for(var i = 0; i < output.length; i++) {
- assert.eq(output[i]._id * (numDocs/numKeys), output[i].value, tojson(output));
+ var map = function() {
+ emit(this.key, this.value);
+ };
+ var reduce = function(k, values) {
+ var total = 0;
+ for (var i = 0; i < values.length; i++) {
+ total += values[i];
+ }
+ return total;
+ };
+ function validateOutput(output) {
+ assert.eq(output.length, numKeys, tojson(output));
+ for (var i = 0; i < output.length; i++) {
+ assert.eq(output[i]._id * (numDocs / numKeys), output[i].value, tojson(output));
+ }
}
-}
-var res = staleMongos1.getCollection(collName).mapReduce(map, reduce, {out: {inline: 1}});
-validateOutput(res.results);
+ var res = staleMongos1.getCollection(collName).mapReduce(map, reduce, {out: {inline: 1}});
+ validateOutput(res.results);
-jsTest.log("Doing aggregation");
+ jsTest.log("Doing aggregation");
-res = staleMongos2.getCollection(collName).aggregate([
- {'$group': {_id: "$key", value: {"$sum": "$value"}}},
- {'$sort': {_id: 1}}]);
-validateOutput(res.toArray());
+ res = staleMongos2.getCollection(collName).aggregate(
+ [{'$group': {_id: "$key", value: {"$sum": "$value"}}}, {'$sort': {_id: 1}}]);
+ validateOutput(res.toArray());
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/mr_noscripting.js b/jstests/sharding/mr_noscripting.js
index a7663d54ccc..a2940d51c43 100644
--- a/jstests/sharding/mr_noscripting.js
+++ b/jstests/sharding/mr_noscripting.js
@@ -1,17 +1,17 @@
var shardOpts = [
- { noscripting: '' },
- { } // just use default params
+ {noscripting: ''},
+ {} // just use default params
];
-var st = new ShardingTest({ shards: shardOpts, other: { nopreallocj: 1 }});
+var st = new ShardingTest({shards: shardOpts, other: {nopreallocj: 1}});
var mongos = st.s;
-st.shardColl('bar', { x: 1 });
+st.shardColl('bar', {x: 1});
var testDB = mongos.getDB('test');
var coll = testDB.bar;
-coll.insert({ x: 1 });
+coll.insert({x: 1});
var map = function() {
emit(this.x, 1);
@@ -21,21 +21,19 @@ var reduce = function(key, values) {
return 1;
};
-var mrResult = testDB.runCommand({ mapreduce: 'bar', map: map, reduce: reduce,
- out: { inline: 1 }});
+var mrResult = testDB.runCommand({mapreduce: 'bar', map: map, reduce: reduce, out: {inline: 1}});
assert.eq(0, mrResult.ok, 'mr result: ' + tojson(mrResult));
// Confirm that mongos did not crash
-assert(testDB.adminCommand({ serverStatus: 1 }).ok);
+assert(testDB.adminCommand({serverStatus: 1}).ok);
// Confirm that the rest of the shards did not crash
-mongos.getDB('config').shards.find().forEach(function (shardDoc){
+mongos.getDB('config').shards.find().forEach(function(shardDoc) {
var shardConn = new Mongo(shardDoc.host);
var adminDB = shardConn.getDB('admin');
- var cmdResult = adminDB.runCommand({ serverStatus: 1 });
+ var cmdResult = adminDB.runCommand({serverStatus: 1});
- assert(cmdResult.ok, 'serverStatus on ' + shardDoc.host +
- ' failed, result: ' + tojson(cmdResult));
+ assert(cmdResult.ok,
+ 'serverStatus on ' + shardDoc.host + ' failed, result: ' + tojson(cmdResult));
});
-
diff --git a/jstests/sharding/mr_shard_version.js b/jstests/sharding/mr_shard_version.js
index 481feb7f268..fc2f7f02e4b 100644
--- a/jstests/sharding/mr_shard_version.js
+++ b/jstests/sharding/mr_shard_version.js
@@ -1,89 +1,100 @@
// Test for SERVER-4158 (version changes during mapreduce)
(function() {
-var st = new ShardingTest({ shards : 2, mongos : 1 });
+ var st = new ShardingTest({shards: 2, mongos: 1});
-//Stop balancer, since it'll just get in the way of these
-st.stopBalancer();
+ // Stop balancer, since it'll just get in the way of these
+ st.stopBalancer();
-var coll = st.s.getCollection( jsTest.name() + ".coll" );
+ var coll = st.s.getCollection(jsTest.name() + ".coll");
-var numDocs = 50000;
-var numKeys = 1000;
-var numTests = 3;
+ var numDocs = 50000;
+ var numKeys = 1000;
+ var numTests = 3;
-var bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < numDocs; i++ ){
- bulk.insert({ _id: i, key: "" + ( i % numKeys ), value: i % numKeys });
-}
-assert.writeOK(bulk.execute());
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, key: "" + (i % numKeys), value: i % numKeys});
+ }
+ assert.writeOK(bulk.execute());
+
+ assert.eq(numDocs, coll.find().itcount());
+
+ var halfId = coll.find().itcount() / 2;
+
+ // Shard collection in half
+ st.shardColl(coll, {_id: 1}, {_id: halfId});
+
+ st.printShardingStatus();
+
+ jsTest.log("Collection now initialized with keys and values...");
+
+ jsTest.log("Starting migrations...");
+
+ var migrateOp = {
+ op: "command",
+ ns: "admin",
+ command: {moveChunk: "" + coll}
+ };
+
+ var checkMigrate = function() {
+ print("Result of migrate : ");
+ printjson(this);
+ };
+
+ var ops = {};
+ for (var i = 0; i < st._connections.length; i++) {
+ for (var j = 0; j < 2; j++) {
+ ops["" + (i * 2 + j)] = {
+ op: "command",
+ ns: "admin",
+ command: {
+ moveChunk: "" + coll,
+ find: {_id: (j == 0 ? 0 : halfId)},
+ to: st._connections[i].shardName
+ },
+ check: checkMigrate
+ };
+ }
+ }
-assert.eq( numDocs, coll.find().itcount() );
+ var bid = benchStart({ops: ops, host: st.s.host, parallel: 1, handleErrors: false});
-var halfId = coll.find().itcount() / 2;
+ jsTest.log("Starting m/r...");
-// Shard collection in half
-st.shardColl( coll, { _id : 1 }, { _id : halfId } );
+ var map = function() {
+ emit(this.key, this.value);
+ };
+ var reduce = function(k, values) {
+ var total = 0;
+ for (var i = 0; i < values.length; i++)
+ total += values[i];
+ return total;
+ };
-st.printShardingStatus();
+ var outputColl = st.s.getCollection(jsTest.name() + ".mrOutput");
-jsTest.log( "Collection now initialized with keys and values..." );
+ jsTest.log("Output coll : " + outputColl);
-jsTest.log( "Starting migrations..." );
+ for (var t = 0; t < numTests; t++) {
+ var results = coll.mapReduce(map, reduce, {out: {replace: outputColl.getName()}});
-var migrateOp = { op : "command", ns : "admin", command : { moveChunk : "" + coll } };
+ // Assert that the results are actually correct, all keys have values of (numDocs / numKeys)
+ // x key
+ var output = outputColl.find().sort({_id: 1}).toArray();
-var checkMigrate = function(){ print( "Result of migrate : " ); printjson( this ); };
+ // printjson( output )
-var ops = {};
-for( var i = 0; i < st._connections.length; i++ ){
- for( var j = 0; j < 2; j++ ){
- ops[ "" + (i * 2 + j) ] = { op : "command", ns : "admin",
- command : { moveChunk : "" + coll,
- find : { _id : ( j == 0 ? 0 : halfId ) },
- to : st._connections[i].shardName },
- check : checkMigrate };
+ assert.eq(output.length, numKeys);
+ printjson(output);
+ for (var i = 0; i < output.length; i++)
+ assert.eq(parseInt(output[i]._id) * (numDocs / numKeys), output[i].value);
}
-}
-
-var bid = benchStart({ ops : ops,
- host : st.s.host,
- parallel : 1,
- handleErrors : false });
-
-jsTest.log( "Starting m/r..." );
-
-var map = function(){ emit( this.key, this.value ); };
-var reduce = function(k, values){
- var total = 0;
- for( var i = 0; i < values.length; i++ ) total += values[i];
- return total;
-};
-
-var outputColl = st.s.getCollection( jsTest.name() + ".mrOutput" );
-
-jsTest.log( "Output coll : " + outputColl );
-
-for( var t = 0; t < numTests; t++ ){
-
- var results = coll.mapReduce( map, reduce, { out : { replace : outputColl.getName() } });
-
- // Assert that the results are actually correct, all keys have values of (numDocs / numKeys) x key
- var output = outputColl.find().sort({ _id : 1 }).toArray();
-
- // printjson( output )
-
- assert.eq( output.length, numKeys );
- printjson( output );
- for( var i = 0; i < output.length; i++ )
- assert.eq( parseInt( output[i]._id ) * ( numDocs / numKeys ), output[i].value );
-
-}
-
-jsTest.log( "Finishing parallel migrations..." );
-
-printjson( benchFinish( bid ) );
-
-st.stop();
+
+ jsTest.log("Finishing parallel migrations...");
+
+ printjson(benchFinish(bid));
+
+ st.stop();
})();
diff --git a/jstests/sharding/multi_coll_drop.js b/jstests/sharding/multi_coll_drop.js
index c4c2362bf44..96d939e7b99 100644
--- a/jstests/sharding/multi_coll_drop.js
+++ b/jstests/sharding/multi_coll_drop.js
@@ -1,46 +1,46 @@
// Tests the dropping and re-adding of a collection
(function() {
-var st = new ShardingTest({ name: "multidrop", shards: 1, mongos: 2 });
+ var st = new ShardingTest({name: "multidrop", shards: 1, mongos: 2});
-var mA = st.s0;
-var mB = st.s1;
+ var mA = st.s0;
+ var mB = st.s1;
-var coll = mA.getCollection('multidrop.coll');
-var collB = mB.getCollection('multidrop.coll');
+ var coll = mA.getCollection('multidrop.coll');
+ var collB = mB.getCollection('multidrop.coll');
-jsTestLog( "Shard and split collection..." );
+ jsTestLog("Shard and split collection...");
-var admin = mA.getDB( "admin" );
-admin.runCommand({ enableSharding : coll.getDB() + "" });
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
+ var admin = mA.getDB("admin");
+ admin.runCommand({enableSharding: coll.getDB() + ""});
+ admin.runCommand({shardCollection: coll + "", key: {_id: 1}});
-for( var i = -100; i < 100; i++ ){
- admin.runCommand({ split : coll + "", middle : { _id : i } });
-}
+ for (var i = -100; i < 100; i++) {
+ admin.runCommand({split: coll + "", middle: {_id: i}});
+ }
-jsTestLog( "Create versioned connection for each mongos..." );
+ jsTestLog("Create versioned connection for each mongos...");
-coll.find().itcount();
-collB.find().itcount();
+ coll.find().itcount();
+ collB.find().itcount();
-jsTestLog( "Dropping sharded collection..." );
-coll.drop();
+ jsTestLog("Dropping sharded collection...");
+ coll.drop();
-jsTestLog( "Recreating collection..." );
+ jsTestLog("Recreating collection...");
-admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } });
-for( var i = -10; i < 10; i++ ){
- admin.runCommand({ split : coll + "", middle : { _id : i } });
-}
+ admin.runCommand({shardCollection: coll + "", key: {_id: 1}});
+ for (var i = -10; i < 10; i++) {
+ admin.runCommand({split: coll + "", middle: {_id: i}});
+ }
-jsTestLog( "Retrying connections..." );
+ jsTestLog("Retrying connections...");
-coll.find().itcount();
-collB.find().itcount();
+ coll.find().itcount();
+ collB.find().itcount();
-jsTestLog( "Done." );
+ jsTestLog("Done.");
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/multi_mongos2.js b/jstests/sharding/multi_mongos2.js
index 43be2ecd9da..9184ce9e807 100644
--- a/jstests/sharding/multi_mongos2.js
+++ b/jstests/sharding/multi_mongos2.js
@@ -1,73 +1,78 @@
// This tests sharding an existing collection that both shards are aware of (SERVER-2828)
(function() {
-var s1 = new ShardingTest({ name: "multi_mongos1", shards: 2, mongos: 2 });
-s2 = s1._mongos[1];
+ var s1 = new ShardingTest({name: "multi_mongos1", shards: 2, mongos: 2});
+ s2 = s1._mongos[1];
-s1.adminCommand( { enablesharding : "test" } );
-s1.ensurePrimaryShard('test', 'shard0001');
-s1.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+ s1.adminCommand({enablesharding: "test"});
+ s1.ensurePrimaryShard('test', 'shard0001');
+ s1.adminCommand({shardcollection: "test.foo", key: {num: 1}});
-s1.config.databases.find().forEach( printjson );
+ s1.config.databases.find().forEach(printjson);
-// test queries
+ // test queries
-s1.getDB('test').existing.insert({_id:1});
-assert.eq(1, s1.getDB('test').existing.count({_id:1}));
-assert.eq(1, s2.getDB('test').existing.count({_id:1}));
+ s1.getDB('test').existing.insert({_id: 1});
+ assert.eq(1, s1.getDB('test').existing.count({_id: 1}));
+ assert.eq(1, s2.getDB('test').existing.count({_id: 1}));
-// Balancer is by default stopped, thus it will not interfere with manual chunk moves.
+ // Balancer is by default stopped, thus it will not interfere with manual chunk moves.
-s2.adminCommand( { shardcollection : "test.existing" , key : { _id : 1 } } );
-assert.commandWorked(s2.adminCommand({ split: "test.existing", middle: { _id: 5 }}));
+ s2.adminCommand({shardcollection: "test.existing", key: {_id: 1}});
+ assert.commandWorked(s2.adminCommand({split: "test.existing", middle: {_id: 5}}));
-res = s2.getDB( "admin" ).runCommand( { moveChunk: "test.existing" ,
- find : { _id : 1 } ,
- to : s1.getOther( s1.getPrimaryShard( "test" ) ).name } );
+ res = s2.getDB("admin").runCommand({
+ moveChunk: "test.existing",
+ find: {_id: 1},
+ to: s1.getOther(s1.getPrimaryShard("test")).name
+ });
-assert.eq(1 , res.ok, tojson(res));
+ assert.eq(1, res.ok, tojson(res));
-s1.startBalancer();
+ s1.startBalancer();
-printjson( s2.adminCommand( {"getShardVersion" : "test.existing" } ) );
-printjson( new Mongo(s1.getPrimaryShard( "test" ).name).getDB( "admin" )
- .adminCommand( {"getShardVersion" : "test.existing" } ) );
+ printjson(s2.adminCommand({"getShardVersion": "test.existing"}));
+ printjson(new Mongo(s1.getPrimaryShard("test").name)
+ .getDB("admin")
+ .adminCommand({"getShardVersion": "test.existing"}));
-assert.eq(1, s1.getDB('test').existing.count({_id:1})); // SERVER-2828
-assert.eq(1, s2.getDB('test').existing.count({_id:1}));
+ assert.eq(1, s1.getDB('test').existing.count({_id: 1})); // SERVER-2828
+ assert.eq(1, s2.getDB('test').existing.count({_id: 1}));
-// test stats
+ // test stats
-s1.getDB('test').existing2.insert({_id:1});
-assert.eq(1, s1.getDB('test').existing2.count({_id:1}));
-assert.eq(1, s2.getDB('test').existing2.count({_id:1}));
+ s1.getDB('test').existing2.insert({_id: 1});
+ assert.eq(1, s1.getDB('test').existing2.count({_id: 1}));
+ assert.eq(1, s2.getDB('test').existing2.count({_id: 1}));
-s2.adminCommand( { shardcollection : "test.existing2" , key : { _id : 1 } } );
-assert.commandWorked(s2.adminCommand({ split: "test.existing2", middle: { _id: 5 }}));
+ s2.adminCommand({shardcollection: "test.existing2", key: {_id: 1}});
+ assert.commandWorked(s2.adminCommand({split: "test.existing2", middle: {_id: 5}}));
-var res = s1.getDB('test').existing2.stats();
-printjson( res );
-assert.eq(true, res.sharded); //SERVER-2828
-assert.eq(true, s2.getDB('test').existing2.stats().sharded);
+ var res = s1.getDB('test').existing2.stats();
+ printjson(res);
+ assert.eq(true, res.sharded); // SERVER-2828
+ assert.eq(true, s2.getDB('test').existing2.stats().sharded);
-// test admin commands
+ // test admin commands
-s1.getDB('test').existing3.insert({_id:1});
-assert.eq(1, s1.getDB('test').existing3.count({_id:1}));
-assert.eq(1, s2.getDB('test').existing3.count({_id:1}));
+ s1.getDB('test').existing3.insert({_id: 1});
+ assert.eq(1, s1.getDB('test').existing3.count({_id: 1}));
+ assert.eq(1, s2.getDB('test').existing3.count({_id: 1}));
-s1.stopBalancer();
+ s1.stopBalancer();
-s2.adminCommand( { shardcollection : "test.existing3" , key : { _id : 1 } } );
-assert.commandWorked(s2.adminCommand({ split: "test.existing3", middle: { _id: 5 }}));
+ s2.adminCommand({shardcollection: "test.existing3", key: {_id: 1}});
+ assert.commandWorked(s2.adminCommand({split: "test.existing3", middle: {_id: 5}}));
-res = s1.getDB( "admin" ).runCommand( { moveChunk: "test.existing3",
- find : { _id : 1 },
- to : s1.getOther( s1.getPrimaryShard( "test" ) ).name } );
-assert.eq(1 , res.ok, tojson(res));
+ res = s1.getDB("admin").runCommand({
+ moveChunk: "test.existing3",
+ find: {_id: 1},
+ to: s1.getOther(s1.getPrimaryShard("test")).name
+ });
+ assert.eq(1, res.ok, tojson(res));
-s1.startBalancer();
+ s1.startBalancer();
-s1.stop();
+ s1.stop();
})();
diff --git a/jstests/sharding/multi_mongos2a.js b/jstests/sharding/multi_mongos2a.js
index 3dea44fc4c5..829ce0de194 100644
--- a/jstests/sharding/multi_mongos2a.js
+++ b/jstests/sharding/multi_mongos2a.js
@@ -1,36 +1,35 @@
// This tests sharding an existing collection that both shards are aware of (SERVER-2828)
(function() {
-var s1 = new ShardingTest({ name: "multi_mongos2a",
- shards: 2,
- mongos: 2 });
-s2 = s1._mongos[1];
+ var s1 = new ShardingTest({name: "multi_mongos2a", shards: 2, mongos: 2});
+ s2 = s1._mongos[1];
-s1.adminCommand( { enablesharding : "test" } );
-s1.ensurePrimaryShard('test', 'shard0001');
-s1.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+ s1.adminCommand({enablesharding: "test"});
+ s1.ensurePrimaryShard('test', 'shard0001');
+ s1.adminCommand({shardcollection: "test.foo", key: {num: 1}});
-s1.config.databases.find().forEach( printjson );
+ s1.config.databases.find().forEach(printjson);
-s1.getDB('test').existing.insert({_id:1});
-assert.eq(1, s1.getDB('test').existing.count({_id:1}));
-assert.eq(1, s2.getDB('test').existing.count({_id:1}));
+ s1.getDB('test').existing.insert({_id: 1});
+ assert.eq(1, s1.getDB('test').existing.count({_id: 1}));
+ assert.eq(1, s2.getDB('test').existing.count({_id: 1}));
-s2.adminCommand( { shardcollection : "test.existing" , key : { _id : 1 } } );
-assert.eq(true, s2.getDB('test').existing.stats().sharded);
+ s2.adminCommand({shardcollection: "test.existing", key: {_id: 1}});
+ assert.eq(true, s2.getDB('test').existing.stats().sharded);
+ res = s2.getDB("admin").runCommand({
+ moveChunk: "test.existing",
+ find: {_id: 1},
+ to: s1.getOther(s1.getPrimaryShard("test")).name
+ });
-res = s2.getDB( "admin" ).runCommand( { moveChunk: "test.existing",
- find : { _id : 1 },
- to : s1.getOther( s1.getPrimaryShard( "test" ) ).name } );
+ assert.eq(1, res.ok, tojson(res));
-assert.eq(1 , res.ok, tojson(res));
+ s1.adminCommand({flushRouterConfig: 1});
-s1.adminCommand( { flushRouterConfig : 1 } );
+ assert.eq(1, s1.getDB('test').existing.count({_id: 1})); // SERVER-2828
+ assert.eq(1, s2.getDB('test').existing.count({_id: 1}));
-assert.eq(1, s1.getDB('test').existing.count({_id:1})); // SERVER-2828
-assert.eq(1, s2.getDB('test').existing.count({_id:1}));
-
-s1.stop();
+ s1.stop();
})();
diff --git a/jstests/sharding/multi_write_target.js b/jstests/sharding/multi_write_target.js
index 3de867ed5ea..9c4f37430da 100644
--- a/jstests/sharding/multi_write_target.js
+++ b/jstests/sharding/multi_write_target.js
@@ -2,72 +2,72 @@
// Tests that multi-writes (update/delete) target *all* shards and not just shards in the collection
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards: 3, mongos: 2 });
+ var st = new ShardingTest({shards: 3, mongos: 2});
-var admin = st.s0.getDB( "admin" );
-var coll = st.s0.getCollection( "foo.bar" );
+ var admin = st.s0.getDB("admin");
+ var coll = st.s0.getCollection("foo.bar");
-assert.commandWorked(admin.runCommand({ enableSharding: coll.getDB() + "" }));
-st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
-assert.commandWorked(admin.runCommand({ shardCollection: coll + "", key: { skey: 1 } }));
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ st.ensurePrimaryShard(coll.getDB() + "", st.shard0.shardName);
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {skey: 1}}));
-assert.commandWorked(admin.runCommand({ split: coll + "", middle: { skey: 0 } }));
-assert.commandWorked(admin.runCommand({ split: coll + "", middle: { skey: 100 } }));
-assert.commandWorked(
- admin.runCommand({ moveChunk: coll + "", find: { skey: 0 }, to: st.shard1.shardName }));
-assert.commandWorked(
- admin.runCommand({ moveChunk: coll + "", find: { skey: 100 }, to: st.shard2.shardName }));
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {skey: 0}}));
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {skey: 100}}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {skey: 0}, to: st.shard1.shardName}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: coll + "", find: {skey: 100}, to: st.shard2.shardName}));
-jsTest.log("Testing multi-update...");
+ jsTest.log("Testing multi-update...");
-// Put data on all shards
-assert.writeOK(st.s0.getCollection(coll.toString()).insert({ _id: 0, skey: -1, x: 1 }));
-assert.writeOK(st.s0.getCollection(coll.toString()).insert({ _id: 1, skey: 1, x: 1 }));
-assert.writeOK(st.s0.getCollection(coll.toString()).insert({ _id: 0, skey: 100, x: 1 }));
+ // Put data on all shards
+ assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 0, skey: -1, x: 1}));
+ assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 1, skey: 1, x: 1}));
+ assert.writeOK(st.s0.getCollection(coll.toString()).insert({_id: 0, skey: 100, x: 1}));
-// Non-multi-update doesn't work without shard key
-assert.writeError(coll.update({ x: 1 }, { $set: { updated: true } }, { multi: false }));
-assert.writeOK(coll.update({ x: 1 }, { $set: { updated: true } }, { multi: true }));
+ // Non-multi-update doesn't work without shard key
+ assert.writeError(coll.update({x: 1}, {$set: {updated: true}}, {multi: false}));
+ assert.writeOK(coll.update({x: 1}, {$set: {updated: true}}, {multi: true}));
-// Ensure update goes to *all* shards
-assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({ updated: true }));
-assert.neq(null, st.shard1.getCollection(coll.toString()).findOne({ updated: true }));
-assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({ updated: true }));
+ // Ensure update goes to *all* shards
+ assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({updated: true}));
+ assert.neq(null, st.shard1.getCollection(coll.toString()).findOne({updated: true}));
+ assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({updated: true}));
-// _id update works, and goes to all shards even on the stale mongos
-var staleColl = st.s1.getCollection('foo.bar');
-assert.writeOK(staleColl.update({ _id: 0 }, { $set: { updatedById: true } }, { multi: false }));
+ // _id update works, and goes to all shards even on the stale mongos
+ var staleColl = st.s1.getCollection('foo.bar');
+ assert.writeOK(staleColl.update({_id: 0}, {$set: {updatedById: true}}, {multi: false}));
-// Ensure _id update goes to *all* shards
-assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({ updatedById: true }));
-assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({ updatedById: true }));
+ // Ensure _id update goes to *all* shards
+ assert.neq(null, st.shard0.getCollection(coll.toString()).findOne({updatedById: true}));
+ assert.neq(null, st.shard2.getCollection(coll.toString()).findOne({updatedById: true}));
-jsTest.log("Testing multi-delete...");
+ jsTest.log("Testing multi-delete...");
-// non-multi-delete doesn't work without shard key
-assert.writeError(coll.remove({ x: 1 }, { justOne: true }));
+ // non-multi-delete doesn't work without shard key
+ assert.writeError(coll.remove({x: 1}, {justOne: true}));
-assert.writeOK(coll.remove({ x: 1 }, { justOne: false }));
+ assert.writeOK(coll.remove({x: 1}, {justOne: false}));
-// Ensure delete goes to *all* shards
-assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({ x: 1 }));
-assert.eq(null, st.shard1.getCollection(coll.toString()).findOne({ x: 1 }));
-assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({ x: 1 }));
+ // Ensure delete goes to *all* shards
+ assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({x: 1}));
+ assert.eq(null, st.shard1.getCollection(coll.toString()).findOne({x: 1}));
+ assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({x: 1}));
-// Put more on all shards
-assert.writeOK(st.shard0.getCollection(coll.toString()).insert({ _id: 0, skey: -1, x: 1 }));
-assert.writeOK(st.shard1.getCollection(coll.toString()).insert({ _id: 1, skey: 1, x: 1 }));
-// Data not in chunks
-assert.writeOK(st.shard2.getCollection(coll.toString()).insert({ _id: 0, x: 1 }));
+ // Put more on all shards
+ assert.writeOK(st.shard0.getCollection(coll.toString()).insert({_id: 0, skey: -1, x: 1}));
+ assert.writeOK(st.shard1.getCollection(coll.toString()).insert({_id: 1, skey: 1, x: 1}));
+ // Data not in chunks
+ assert.writeOK(st.shard2.getCollection(coll.toString()).insert({_id: 0, x: 1}));
-assert.writeOK(coll.remove({ _id: 0 }, { justOne: true }));
+ assert.writeOK(coll.remove({_id: 0}, {justOne: true}));
-// Ensure _id delete goes to *all* shards
-assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({ x: 1 }));
-assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({ x: 1 }));
+ // Ensure _id delete goes to *all* shards
+ assert.eq(null, st.shard0.getCollection(coll.toString()).findOne({x: 1}));
+ assert.eq(null, st.shard2.getCollection(coll.toString()).findOne({x: 1}));
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/names.js b/jstests/sharding/names.js
index 28612681e46..33a337e5656 100644
--- a/jstests/sharding/names.js
+++ b/jstests/sharding/names.js
@@ -1,50 +1,52 @@
// Test that having replica set names the same as the names of other shards works fine
(function() {
-var st = new ShardingTest({ name: "HostNames",
- shards: 0,
- mongos: 2,
- other: { rs : true } });
+ var st = new ShardingTest({name: "HostNames", shards: 0, mongos: 2, other: {rs: true}});
-var rsA = new ReplSetTest({ nodes : 2, name : "rsA" });
-var rsB = new ReplSetTest({ nodes : 2, name : "rsB" });
+ var rsA = new ReplSetTest({nodes: 2, name: "rsA"});
+ var rsB = new ReplSetTest({nodes: 2, name: "rsB"});
-rsA.startSet();
-rsB.startSet();
-rsA.initiate();
-rsB.initiate();
-rsA.getPrimary();
-rsB.getPrimary();
+ rsA.startSet();
+ rsB.startSet();
+ rsA.initiate();
+ rsB.initiate();
+ rsA.getPrimary();
+ rsB.getPrimary();
-var mongos = st.s;
-var config = mongos.getDB("config");
-var admin = mongos.getDB("admin");
+ var mongos = st.s;
+ var config = mongos.getDB("config");
+ var admin = mongos.getDB("admin");
-assert( admin.runCommand({ addShard : rsA.getURL(), name : rsB.name }).ok );
-printjson( config.shards.find().toArray() );
+ assert(admin.runCommand({addShard: rsA.getURL(), name: rsB.name}).ok);
+ printjson(config.shards.find().toArray());
-assert( admin.runCommand({ addShard : rsB.getURL(), name : rsA.name }).ok );
-printjson( config.shards.find().toArray() );
+ assert(admin.runCommand({addShard: rsB.getURL(), name: rsA.name}).ok);
+ printjson(config.shards.find().toArray());
-assert.eq(2, config.shards.count(), "Error adding a shard");
-assert.eq(rsB.getURL(), config.shards.findOne({_id:rsA.name})["host"], "Wrong host for shard rsA");
-assert.eq(rsA.getURL(), config.shards.findOne({_id:rsB.name})["host"], "Wrong host for shard rsB");
+ assert.eq(2, config.shards.count(), "Error adding a shard");
+ assert.eq(
+ rsB.getURL(), config.shards.findOne({_id: rsA.name})["host"], "Wrong host for shard rsA");
+ assert.eq(
+ rsA.getURL(), config.shards.findOne({_id: rsB.name})["host"], "Wrong host for shard rsB");
-// Remove shard
-assert( admin.runCommand( { removeshard: rsA.name } ).ok , "failed to start draining shard" );
-assert( admin.runCommand( { removeshard: rsA.name } ).ok , "failed to remove shard" );
+ // Remove shard
+ assert(admin.runCommand({removeshard: rsA.name}).ok, "failed to start draining shard");
+ assert(admin.runCommand({removeshard: rsA.name}).ok, "failed to remove shard");
-assert.eq(1, config.shards.count(), "Error removing a shard");
-assert.eq(rsA.getURL(), config.shards.findOne({_id:rsB.name})["host"], "Wrong host for shard rsB 2");
+ assert.eq(1, config.shards.count(), "Error removing a shard");
+ assert.eq(
+ rsA.getURL(), config.shards.findOne({_id: rsB.name})["host"], "Wrong host for shard rsB 2");
-// Re-add shard
-assert( admin.runCommand({ addShard : rsB.getURL(), name : rsA.name }).ok );
-printjson( config.shards.find().toArray() );
+ // Re-add shard
+ assert(admin.runCommand({addShard: rsB.getURL(), name: rsA.name}).ok);
+ printjson(config.shards.find().toArray());
-assert.eq(2, config.shards.count(), "Error re-adding a shard");
-assert.eq(rsB.getURL(), config.shards.findOne({_id:rsA.name})["host"], "Wrong host for shard rsA 3");
-assert.eq(rsA.getURL(), config.shards.findOne({_id:rsB.name})["host"], "Wrong host for shard rsB 3");
+ assert.eq(2, config.shards.count(), "Error re-adding a shard");
+ assert.eq(
+ rsB.getURL(), config.shards.findOne({_id: rsA.name})["host"], "Wrong host for shard rsA 3");
+ assert.eq(
+ rsA.getURL(), config.shards.findOne({_id: rsB.name})["host"], "Wrong host for shard rsB 3");
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/noUpdateButN1inAnotherCollection.js b/jstests/sharding/noUpdateButN1inAnotherCollection.js
index 27c40e9056b..a95ee5924a0 100644
--- a/jstests/sharding/noUpdateButN1inAnotherCollection.js
+++ b/jstests/sharding/noUpdateButN1inAnotherCollection.js
@@ -1,57 +1,57 @@
-function debug( str ) {
- print( "---\n" + str + "\n-----" );
+function debug(str) {
+ print("---\n" + str + "\n-----");
}
var name = "badNonUpdate";
debug("Starting sharded cluster test stuff");
-var s = new ShardingTest({ name: name, shards: 2, mongos : 2 });
+var s = new ShardingTest({name: name, shards: 2, mongos: 2});
-var mongosA=s.s0;
-var mongosB=s.s1;
+var mongosA = s.s0;
+var mongosB = s.s1;
ns = "test.coll";
ns2 = "test.coll2";
-adminSA = mongosA.getDB( "admin" );
-adminSA.runCommand({ enableSharding : "test"});
+adminSA = mongosA.getDB("admin");
+adminSA.runCommand({enableSharding: "test"});
-adminSA.runCommand( { moveprimary : "test", to : "shard0000" } );
-adminSA.runCommand( { moveprimary : "test2", to : "shard0001" } );
+adminSA.runCommand({moveprimary: "test", to: "shard0000"});
+adminSA.runCommand({moveprimary: "test2", to: "shard0001"});
-adminSA.runCommand({ shardCollection : ns, key : { _id : 1 } });
+adminSA.runCommand({shardCollection: ns, key: {_id: 1}});
try {
- s.stopBalancer();
-} catch (e) {
- print("coundn't stop balancer via command");
+ s.stopBalancer();
+} catch (e) {
+ print("coundn't stop balancer via command");
}
-adminSA.settings.update({ _id: 'balancer' }, { $set: { stopped: true }});
+adminSA.settings.update({_id: 'balancer'}, {$set: {stopped: true}});
-var db = mongosA.getDB( "test" );
+var db = mongosA.getDB("test");
var coll = db.coll;
var coll2 = db.coll2;
numDocs = 10;
for (var i = 1; i < numDocs; i++) {
- coll.insert({_id:i, control:0});
- coll2.insert({_id:i, control:0});
+ coll.insert({_id: i, control: 0});
+ coll2.insert({_id: i, control: 0});
}
debug("Inserted docs, now split chunks");
-adminSA.runCommand( { split: ns, find : { _id : 3} });
-adminSA.runCommand( { movechunk: ns, find : { _id : 10}, to: "shard0001" });
+adminSA.runCommand({split: ns, find: {_id: 3}});
+adminSA.runCommand({movechunk: ns, find: {_id: 10}, to: "shard0001"});
var command = 'printjson(db.coll.update({ _id: 9 }, { $set: { a: "9" }}, true));';
// without this first query through mongo, the second time doesn't "fail"
debug("Try query first time");
-runMongoProgram( "mongo", "--quiet", "--port", "" + s._mongos[1].port, "--eval", command );
+runMongoProgram("mongo", "--quiet", "--port", "" + s._mongos[1].port, "--eval", command);
-var res = mongosB.getDB("test").coll2.update({ _id: 0 }, { $set: { c: "333" }});
-assert.eq( 0, res.nModified );
+var res = mongosB.getDB("test").coll2.update({_id: 0}, {$set: {c: "333"}});
+assert.eq(0, res.nModified);
s.stop();
diff --git a/jstests/sharding/no_empty_reset.js b/jstests/sharding/no_empty_reset.js
index bda63ee9edc..61fe5905cc0 100644
--- a/jstests/sharding/no_empty_reset.js
+++ b/jstests/sharding/no_empty_reset.js
@@ -1,63 +1,64 @@
// Tests that an empty shard can't be the cause of a chunk reset
-var st = new ShardingTest({ shards : 2, mongos : 2 });
+var st = new ShardingTest({shards: 2, mongos: 2});
// Don't balance since we're manually moving chunks
st.stopBalancer();
-var coll = st.s.getCollection( jsTestName() + ".coll" );
+var coll = st.s.getCollection(jsTestName() + ".coll");
-for( var i = -10; i < 10; i++ )
- coll.insert({ _id : i });
-
-st.shardColl( coll, { _id : 1 }, { _id : 0 } );
+for (var i = -10; i < 10; i++)
+ coll.insert({_id: i});
-jsTestLog( "Sharded setup complete" );
+st.shardColl(coll, {_id: 1}, {_id: 0});
+
+jsTestLog("Sharded setup complete");
st.printShardingStatus();
-jsTestLog( "Setting initial versions for each mongos..." );
+jsTestLog("Setting initial versions for each mongos...");
coll.find().itcount();
-var collB = st.s1.getCollection( "" + coll );
+var collB = st.s1.getCollection("" + coll);
collB.find().itcount();
-jsTestLog( "Migrating via first mongos..." );
+jsTestLog("Migrating via first mongos...");
-var fullShard = st.getShard( coll, { _id : 1 } );
-var emptyShard = st.getShard( coll, { _id : -1 } );
+var fullShard = st.getShard(coll, {_id: 1});
+var emptyShard = st.getShard(coll, {_id: -1});
-var admin = st.s.getDB( "admin" );
+var admin = st.s.getDB("admin");
assert.soon(
- function () {
- var result = admin.runCommand( { moveChunk: "" + coll,
- find: { _id: -1 },
- to: fullShard.shardName,
- _waitForDelete: true } );
+ function() {
+ var result = admin.runCommand({
+ moveChunk: "" + coll,
+ find: {_id: -1},
+ to: fullShard.shardName,
+ _waitForDelete: true
+ });
jsTestLog('moveChunk result = ' + tojson(result));
return result.ok;
},
- "Setup FAILURE: Unable to move chunk from " + emptyShard.shardName +
- " to " + fullShard.shardName
-);
+ "Setup FAILURE: Unable to move chunk from " + emptyShard.shardName + " to " +
+ fullShard.shardName);
-jsTestLog( "Resetting shard version via first mongos..." );
+jsTestLog("Resetting shard version via first mongos...");
coll.find().itcount();
-jsTestLog( "Making sure we don't insert into the wrong shard..." );
+jsTestLog("Making sure we don't insert into the wrong shard...");
-collB.insert({ _id : -11 });
+collB.insert({_id: -11});
-var emptyColl = emptyShard.getCollection( "" + coll );
+var emptyColl = emptyShard.getCollection("" + coll);
-print( emptyColl );
-print( emptyShard );
-print( emptyShard.shardName );
+print(emptyColl);
+print(emptyShard);
+print(emptyShard.shardName);
st.printShardingStatus();
-assert.eq( 0, emptyColl.find().itcount() );
+assert.eq(0, emptyColl.find().itcount());
jsTestLog("DONE!");
st.stop();
diff --git a/jstests/sharding/parallel.js b/jstests/sharding/parallel.js
index a05cfa2d396..cc332d65757 100644
--- a/jstests/sharding/parallel.js
+++ b/jstests/sharding/parallel.js
@@ -1,47 +1,57 @@
// This test fails when run with authentication because benchRun with auth is broken: SERVER-6388
(function() {
-"use strict";
-
-var numShards = 3;
-var s = new ShardingTest({name: "parallel", shards: numShards, mongos: 2});
-
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
-
-var db = s.getDB( "test" );
-
-var N = 10000;
-
-for (var i=0; i<N; i+=(N/12)) {
- s.adminCommand({split: "test.foo", middle: {_id: i}});
- s.s.getDB('admin').runCommand({moveChunk: "test.foo",
- find: {_id: i},
- to: "shard000" + Math.floor(Math.random() * numShards)});
-}
-
-s.startBalancer();
-
-var bulk = db.foo.initializeUnorderedBulkOp();
-for ( i=0; i<N; i++ )
- bulk.insert({ _id: i });
-assert.writeOK(bulk.execute());
-
-var doCommand = function( dbname , cmd ) {
- x = benchRun( { ops : [ { op : "findOne" , ns : dbname + ".$cmd" , query : cmd } ] ,
- host : db.getMongo().host , parallel : 2 , seconds : 2 } );
- printjson(x);
- x = benchRun( { ops : [ { op : "findOne" , ns : dbname + ".$cmd" , query : cmd } ] ,
- host : s._mongos[1].host , parallel : 2 , seconds : 2 } );
+ "use strict";
+
+ var numShards = 3;
+ var s = new ShardingTest({name: "parallel", shards: numShards, mongos: 2});
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+
+ var db = s.getDB("test");
+
+ var N = 10000;
+
+ for (var i = 0; i < N; i += (N / 12)) {
+ s.adminCommand({split: "test.foo", middle: {_id: i}});
+ s.s.getDB('admin').runCommand({
+ moveChunk: "test.foo",
+ find: {_id: i},
+ to: "shard000" + Math.floor(Math.random() * numShards)
+ });
+ }
+
+ s.startBalancer();
+
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ for (i = 0; i < N; i++)
+ bulk.insert({_id: i});
+ assert.writeOK(bulk.execute());
+
+ var doCommand = function(dbname, cmd) {
+ x = benchRun({
+ ops: [{op: "findOne", ns: dbname + ".$cmd", query: cmd}],
+ host: db.getMongo().host,
+ parallel: 2,
+ seconds: 2
+ });
+ printjson(x);
+ x = benchRun({
+ ops: [{op: "findOne", ns: dbname + ".$cmd", query: cmd}],
+ host: s._mongos[1].host,
+ parallel: 2,
+ seconds: 2
+ });
+ printjson(x);
+ };
+
+ doCommand("test", {dbstats: 1});
+ doCommand("config", {dbstats: 1});
+
+ var x = s.getDB("config").stats();
+ assert(x.ok, tojson(x));
printjson(x);
-};
-
-doCommand( "test" , { dbstats : 1 } );
-doCommand( "config" , { dbstats : 1 } );
-
-var x = s.getDB( "config" ).stats();
-assert( x.ok , tojson(x) );
-printjson(x);
-s.stop();
+ s.stop();
}()); \ No newline at end of file
diff --git a/jstests/sharding/pending_chunk.js b/jstests/sharding/pending_chunk.js
index 3455699d9e0..21107fe370d 100644
--- a/jstests/sharding/pending_chunk.js
+++ b/jstests/sharding/pending_chunk.js
@@ -2,88 +2,77 @@
// Tests pending chunk metadata.
//
-(function() {
-"use strict";
-
-var st = new ShardingTest({ shards: 2, mongos: 2, other: { separateConfig: true } });
-
-var mongos = st.s0;
-var admin = mongos.getDB('admin');
-var shards = mongos.getCollection('config.shards').find().toArray();
-var coll = mongos.getCollection('foo.bar');
-var ns = coll.getFullName();
-var dbName = coll.getDB().getName();
-var shard0 = st.shard0, shard1 = st.shard1;
-
-assert.commandWorked(admin.runCommand({enableSharding: dbName}));
-printjson(admin.runCommand({movePrimary: dbName, to: shards[0]._id}));
-assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
-
-jsTest.log('Moving some chunks to shard1...');
-
-assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 0}}));
-assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 1}}));
-assert.commandWorked(admin.runCommand({moveChunk: ns,
- find: {_id: 0},
- to: shards[1]._id,
- _waitForDelete: true}));
-assert.commandWorked(admin.runCommand({moveChunk: ns,
- find: {_id: 1},
- to: shards[1]._id,
- _waitForDelete: true}));
-
-
-function getMetadata(shard) {
- var admin = shard.getDB('admin'),
- metadata = admin.runCommand({
- getShardVersion: ns, fullMetadata: true
- }).metadata;
-
- jsTest.log('Got metadata: ' + tojson(metadata));
- return metadata;
-}
-
-var metadata = getMetadata(shard1);
-assert.eq(metadata.pending[0][0]._id, 1);
-assert.eq(metadata.pending[0][1]._id, MaxKey);
-
-jsTest.log('Moving some chunks back to shard0 after empty...');
-
-assert.commandWorked(admin.runCommand({moveChunk: ns,
- find: {_id: -1},
- to: shards[1]._id,
- _waitForDelete: true}));
-
-metadata = getMetadata(shard0);
-assert.eq(metadata.shardVersion.t, 0);
-assert.neq(metadata.collVersion.t, 0);
-assert.eq(metadata.pending.length, 0);
-
-assert.commandWorked(admin.runCommand({moveChunk: ns,
- find: {_id: 1},
- to: shards[0]._id,
- _waitForDelete: true}));
-
-metadata = getMetadata(shard0);
-assert.eq(metadata.shardVersion.t, 0);
-assert.neq(metadata.collVersion.t, 0);
-assert.eq(metadata.pending[0][0]._id, 1);
-assert.eq(metadata.pending[0][1]._id, MaxKey);
-
-// The pending chunk should be promoted to a real chunk when shard0 reloads
-// its config.
-jsTest.log('Checking that pending chunk is promoted on reload...');
-
-assert.eq(null, coll.findOne({_id: 1}));
-
-metadata = getMetadata(shard0);
-assert.neq(metadata.shardVersion.t, 0);
-assert.neq(metadata.collVersion.t, 0);
-assert.eq(metadata.chunks[0][0]._id, 1);
-assert.eq(metadata.chunks[0][1]._id, MaxKey);
-
-st.printShardingStatus();
-
-st.stop();
+(function() {
+ "use strict";
+
+ var st = new ShardingTest({shards: 2, mongos: 2, other: {separateConfig: true}});
+
+ var mongos = st.s0;
+ var admin = mongos.getDB('admin');
+ var shards = mongos.getCollection('config.shards').find().toArray();
+ var coll = mongos.getCollection('foo.bar');
+ var ns = coll.getFullName();
+ var dbName = coll.getDB().getName();
+ var shard0 = st.shard0, shard1 = st.shard1;
+
+ assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+ printjson(admin.runCommand({movePrimary: dbName, to: shards[0]._id}));
+ assert.commandWorked(admin.runCommand({shardCollection: ns, key: {_id: 1}}));
+
+ jsTest.log('Moving some chunks to shard1...');
+
+ assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 0}}));
+ assert.commandWorked(admin.runCommand({split: ns, middle: {_id: 1}}));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: ns, find: {_id: 0}, to: shards[1]._id, _waitForDelete: true}));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: ns, find: {_id: 1}, to: shards[1]._id, _waitForDelete: true}));
+
+ function getMetadata(shard) {
+ var admin = shard.getDB('admin'),
+ metadata = admin.runCommand({getShardVersion: ns, fullMetadata: true}).metadata;
+
+ jsTest.log('Got metadata: ' + tojson(metadata));
+ return metadata;
+ }
+
+ var metadata = getMetadata(shard1);
+ assert.eq(metadata.pending[0][0]._id, 1);
+ assert.eq(metadata.pending[0][1]._id, MaxKey);
+
+ jsTest.log('Moving some chunks back to shard0 after empty...');
+
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: ns, find: {_id: -1}, to: shards[1]._id, _waitForDelete: true}));
+
+ metadata = getMetadata(shard0);
+ assert.eq(metadata.shardVersion.t, 0);
+ assert.neq(metadata.collVersion.t, 0);
+ assert.eq(metadata.pending.length, 0);
+
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: ns, find: {_id: 1}, to: shards[0]._id, _waitForDelete: true}));
+
+ metadata = getMetadata(shard0);
+ assert.eq(metadata.shardVersion.t, 0);
+ assert.neq(metadata.collVersion.t, 0);
+ assert.eq(metadata.pending[0][0]._id, 1);
+ assert.eq(metadata.pending[0][1]._id, MaxKey);
+
+ // The pending chunk should be promoted to a real chunk when shard0 reloads
+ // its config.
+ jsTest.log('Checking that pending chunk is promoted on reload...');
+
+ assert.eq(null, coll.findOne({_id: 1}));
+
+ metadata = getMetadata(shard0);
+ assert.neq(metadata.shardVersion.t, 0);
+ assert.neq(metadata.collVersion.t, 0);
+ assert.eq(metadata.chunks[0][0]._id, 1);
+ assert.eq(metadata.chunks[0][1]._id, MaxKey);
+
+ st.printShardingStatus();
+
+ st.stop();
})();
diff --git a/jstests/sharding/prefix_shard_key.js b/jstests/sharding/prefix_shard_key.js
index 6e906add822..8ac414113df 100644
--- a/jstests/sharding/prefix_shard_key.js
+++ b/jstests/sharding/prefix_shard_key.js
@@ -7,16 +7,16 @@
// Insert docs with same val for 'skey' but different vals for 'extra'.
// Move chunks around and check that [min,max) chunk boundaries are properly obeyed.
-var s = new ShardingTest({ name : jsTestName(), shards : 2 });
+var s = new ShardingTest({name: jsTestName(), shards: 2});
-var db = s.getDB( "test" );
-var admin = s.getDB( "admin" );
-var config = s.getDB( "config" );
+var db = s.getDB("test");
+var admin = s.getDB("admin");
+var config = s.getDB("config");
var shards = config.shards.find().toArray();
-var shard0 = new Mongo( shards[0].host );
-var shard1 = new Mongo( shards[1].host );
+var shard0 = new Mongo(shards[0].host);
+var shard1 = new Mongo(shards[1].host);
-s.adminCommand( { enablesharding : "test" } );
+s.adminCommand({enablesharding: "test"});
s.ensurePrimaryShard('test', 'shard0001');
//******************Part 1********************
@@ -24,67 +24,75 @@ s.ensurePrimaryShard('test', 'shard0001');
var coll = db.foo;
var longStr = 'a';
-while ( longStr.length < 1024 * 128 ) { longStr += longStr; }
+while (longStr.length < 1024 * 128) {
+ longStr += longStr;
+}
var bulk = coll.initializeUnorderedBulkOp();
-for( i=0 ; i<100; i++){
- bulk.insert({ num: i, str: longStr });
- bulk.insert({ num: i+100, x: i, str: longStr });
+for (i = 0; i < 100; i++) {
+ bulk.insert({num: i, str: longStr});
+ bulk.insert({num: i + 100, x: i, str: longStr});
}
assert.writeOK(bulk.execute());
-//no usable index yet, should throw
-assert.throws( function(){ s.adminCommand( { shardCollection : coll.getFullName(), key : { num : 1 } } ); } );
+// no usable index yet, should throw
+assert.throws(function() {
+ s.adminCommand({shardCollection: coll.getFullName(), key: {num: 1}});
+});
-//create usable index
-assert.commandWorked(coll.ensureIndex({ num: 1, x: 1 }));
+// create usable index
+assert.commandWorked(coll.ensureIndex({num: 1, x: 1}));
-//usable index, but doc with empty 'num' value, so still should throw
-assert.writeOK(coll.insert({ x: -5 }));
-assert.throws( function(){ s.adminCommand( { shardCollection : coll.getFullName(), key : { num : 1 } } ); } );
+// usable index, but doc with empty 'num' value, so still should throw
+assert.writeOK(coll.insert({x: -5}));
+assert.throws(function() {
+ s.adminCommand({shardCollection: coll.getFullName(), key: {num: 1}});
+});
-//remove the bad doc. now should finally succeed
-assert.writeOK(coll.remove({ x: -5 }));
-var result1 = admin.runCommand( { shardCollection : coll.getFullName(), key : { num : 1 } } );
-printjson( result1 );
-assert.eq( 1, result1.ok , "sharding didn't succeed");
+// remove the bad doc. now should finally succeed
+assert.writeOK(coll.remove({x: -5}));
+var result1 = admin.runCommand({shardCollection: coll.getFullName(), key: {num: 1}});
+printjson(result1);
+assert.eq(1, result1.ok, "sharding didn't succeed");
-//make sure extra index is not created
-assert.eq( 2, coll.getIndexes().length );
+// make sure extra index is not created
+assert.eq(2, coll.getIndexes().length);
// make sure balancing happens
-s.awaitBalance( coll.getName(), db.getName() );
+s.awaitBalance(coll.getName(), db.getName());
// Make sure our initial balance cleanup doesn't interfere with later migrations.
-assert.soon( function(){
- print( "Waiting for migration cleanup to occur..." );
+assert.soon(function() {
+ print("Waiting for migration cleanup to occur...");
return coll.count() == coll.find().itcount();
});
s.stopBalancer();
-//test splitting
-var result2 = admin.runCommand( { split : coll.getFullName() , middle : { num : 50 } } );
-printjson( result2 );
-assert.eq( 1, result2.ok , "splitting didn't succeed");
-
-//test moving
-var result3 = admin.runCommand({ movechunk: coll.getFullName(),
- find: { num: 20 },
- to: s.getOther(s.getPrimaryShard("test")).name,
- _waitForDelete: true });
-printjson( result3 );
-assert.eq( 1, result3.ok , "moveChunk didn't succeed");
-
+// test splitting
+var result2 = admin.runCommand({split: coll.getFullName(), middle: {num: 50}});
+printjson(result2);
+assert.eq(1, result2.ok, "splitting didn't succeed");
+
+// test moving
+var result3 = admin.runCommand({
+ movechunk: coll.getFullName(),
+ find: {num: 20},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+});
+printjson(result3);
+assert.eq(1, result3.ok, "moveChunk didn't succeed");
//******************Part 2********************
// Migrations and splits will still work on a sharded collection that only has multi key
// index.
-db.user.ensureIndex({ num: 1, x: 1 });
-db.adminCommand({ shardCollection: 'test.user', key: { num: 1 }});
+db.user.ensureIndex({num: 1, x: 1});
+db.adminCommand({shardCollection: 'test.user', key: {num: 1}});
var indexCount = db.user.getIndexes().length;
-assert.eq(2, indexCount, // indexes for _id_ and num_1_x_1
+assert.eq(2,
+ indexCount, // indexes for _id_ and num_1_x_1
'index count not expected: ' + tojson(db.user.getIndexes()));
var array = [];
@@ -93,37 +101,45 @@ for (var item = 0; item < 50; item++) {
}
for (var docs = 0; docs < 1000; docs++) {
- db.user.insert({ num: docs, x: array });
+ db.user.insert({num: docs, x: array});
}
assert.eq(1000, db.user.find().itcount());
-var result4 = admin.runCommand({ movechunk: 'test.user', find: { num: 70 },
- to: s.getOther(s.getPrimaryShard("test")).name, _waitForDelete: true });
+var result4 = admin.runCommand({
+ movechunk: 'test.user',
+ find: {num: 70},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+});
assert.commandWorked(result4);
-var expectedShardCount = { shard0000: 0, shard0001: 0 };
-config.chunks.find({ ns: 'test.user' }).forEach(function(chunkDoc) {
- var min = chunkDoc.min.num;
- var max = chunkDoc.max.num;
-
- if (min < 0 || min == MinKey) {
- min = 0;
- }
+var expectedShardCount = {
+ shard0000: 0,
+ shard0001: 0
+};
+config.chunks.find({ns: 'test.user'})
+ .forEach(function(chunkDoc) {
+ var min = chunkDoc.min.num;
+ var max = chunkDoc.max.num;
+
+ if (min < 0 || min == MinKey) {
+ min = 0;
+ }
- if (max > 1000 || max == MaxKey) {
- max = 1000;
- }
+ if (max > 1000 || max == MaxKey) {
+ max = 1000;
+ }
- if (max > 0) {
- expectedShardCount[chunkDoc.shard] += (max - min);
- }
-});
+ if (max > 0) {
+ expectedShardCount[chunkDoc.shard] += (max - min);
+ }
+ });
assert.eq(expectedShardCount['shard0000'], shard0.getDB('test').user.find().count());
assert.eq(expectedShardCount['shard0001'], shard1.getDB('test').user.find().count());
-result4 = admin.runCommand({ split: 'test.user', middle: { num: 70 }});
+result4 = admin.runCommand({split: 'test.user', middle: {num: 70}});
assert.commandWorked(result4);
assert.eq(expectedShardCount['shard0000'], shard0.getDB('test').user.find().count());
@@ -134,62 +150,59 @@ assert.eq(expectedShardCount['shard0001'], shard1.getDB('test').user.find().coun
// Check chunk boundaries obeyed when using prefix shard key.
// This test repeats with shard key as the prefix of different longer indices.
-for( i=0; i < 3; i++ ){
-
+for (i = 0; i < 3; i++) {
// setup new collection on shard0
var coll2 = db.foo2;
coll2.drop();
- if ( s.getPrimaryShardIdForDatabase( coll2.getDB() ) != shards[0]._id ) {
- var moveRes = admin.runCommand( { movePrimary : coll2.getDB() + "", to : shards[0]._id } );
- assert.eq( moveRes.ok , 1 , "primary not moved correctly" );
+ if (s.getPrimaryShardIdForDatabase(coll2.getDB()) != shards[0]._id) {
+ var moveRes = admin.runCommand({movePrimary: coll2.getDB() + "", to: shards[0]._id});
+ assert.eq(moveRes.ok, 1, "primary not moved correctly");
}
// declare a longer index
- if ( i == 0 ) {
- assert.commandWorked( coll2.ensureIndex( { skey : 1, extra : 1 } ));
- }
- else if ( i == 1 ) {
- assert.commandWorked( coll2.ensureIndex( { skey : 1, extra : -1 } ));
- }
- else if ( i == 2 ) {
- assert.commandWorked( coll2.ensureIndex( { skey : 1, extra : 1 , superfluous : -1 } ));
+ if (i == 0) {
+ assert.commandWorked(coll2.ensureIndex({skey: 1, extra: 1}));
+ } else if (i == 1) {
+ assert.commandWorked(coll2.ensureIndex({skey: 1, extra: -1}));
+ } else if (i == 2) {
+ assert.commandWorked(coll2.ensureIndex({skey: 1, extra: 1, superfluous: -1}));
}
// then shard collection on prefix
- var shardRes = admin.runCommand( { shardCollection : coll2 + "", key : { skey : 1 } } );
- assert.eq( shardRes.ok , 1 , "collection not sharded" );
+ var shardRes = admin.runCommand({shardCollection: coll2 + "", key: {skey: 1}});
+ assert.eq(shardRes.ok, 1, "collection not sharded");
// insert docs with same value for skey
bulk = coll2.initializeUnorderedBulkOp();
- for( var i = 0; i < 5; i++ ){
- for( var j = 0; j < 5; j++ ){
- bulk.insert( { skey : 0, extra : i , superfluous : j } );
+ for (var i = 0; i < 5; i++) {
+ for (var j = 0; j < 5; j++) {
+ bulk.insert({skey: 0, extra: i, superfluous: j});
}
}
- assert.writeOK( bulk.execute() );
+ assert.writeOK(bulk.execute());
// split on that key, and check it makes 2 chunks
- var splitRes = admin.runCommand( { split : coll2 + "", middle : { skey : 0 } } );
- assert.eq( splitRes.ok , 1 , "split didn't work" );
- assert.eq( config.chunks.find( { ns : coll2.getFullName() } ).count() , 2 );
+ var splitRes = admin.runCommand({split: coll2 + "", middle: {skey: 0}});
+ assert.eq(splitRes.ok, 1, "split didn't work");
+ assert.eq(config.chunks.find({ns: coll2.getFullName()}).count(), 2);
// movechunk should move ALL docs since they have same value for skey
- moveRes = admin.runCommand({ moveChunk: coll2 + "", find: { skey: 0 },
- to: shards[1]._id, _waitForDelete: true });
- assert.eq( moveRes.ok , 1 , "movechunk didn't work" );
+ moveRes = admin.runCommand(
+ {moveChunk: coll2 + "", find: {skey: 0}, to: shards[1]._id, _waitForDelete: true});
+ assert.eq(moveRes.ok, 1, "movechunk didn't work");
// Make sure our migration eventually goes through before testing individual shards
- assert.soon( function(){
- print( "Waiting for migration cleanup to occur..." );
+ assert.soon(function() {
+ print("Waiting for migration cleanup to occur...");
return coll2.count() == coll2.find().itcount();
});
-
+
// check no orphaned docs on the shards
- assert.eq( 0 , shard0.getCollection( coll2 + "" ).find().itcount() );
- assert.eq( 25 , shard1.getCollection( coll2 + "" ).find().itcount() );
+ assert.eq(0, shard0.getCollection(coll2 + "").find().itcount());
+ assert.eq(25, shard1.getCollection(coll2 + "").find().itcount());
// and check total
- assert.eq( 25 , coll2.find().itcount() , "bad total number of docs after move" );
+ assert.eq(25, coll2.find().itcount(), "bad total number of docs after move");
s.printShardingStatus();
}
diff --git a/jstests/sharding/presplit.js b/jstests/sharding/presplit.js
index d5efef30c34..b59dc4aa901 100644
--- a/jstests/sharding/presplit.js
+++ b/jstests/sharding/presplit.js
@@ -1,43 +1,40 @@
(function() {
-var s = new ShardingTest({ name: "presplit",
- shards: 2,
- mongos: 1,
- other: { chunkSize : 1 } });
-
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-
-// Insert enough data in 'test.foo' to fill several chunks, if it was sharded.
-bigString = "";
-while ( bigString.length < 10000 ){
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-}
-
-db = s.getDB( "test" );
-inserted = 0;
-num = 0;
-var bulk = db.foo.initializeUnorderedBulkOp();
-while ( inserted < ( 20 * 1024 * 1024 ) ){
- bulk.insert({ _id: num++, s: bigString });
- inserted += bigString.length;
-}
-assert.writeOK(bulk.execute());
-
-// Make sure that there's only one chunk holding all the data.
-s.printChunks();
-primary = s.getPrimaryShard( "test" ).getDB( "test" );
-assert.eq( 0 , s.config.chunks.count() , "single chunk assertion" );
-assert.eq( num , primary.foo.count() );
-
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
-
-// Make sure the collection's original chunk got split
-s.printChunks();
-assert.lt( 20 , s.config.chunks.count() , "many chunks assertion" );
-assert.eq( num , primary.foo.count() );
-
-s.printChangeLog();
-s.stop();
+ var s = new ShardingTest({name: "presplit", shards: 2, mongos: 1, other: {chunkSize: 1}});
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+
+ // Insert enough data in 'test.foo' to fill several chunks, if it was sharded.
+ bigString = "";
+ while (bigString.length < 10000) {
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+ }
+
+ db = s.getDB("test");
+ inserted = 0;
+ num = 0;
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ while (inserted < (20 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+ }
+ assert.writeOK(bulk.execute());
+
+ // Make sure that there's only one chunk holding all the data.
+ s.printChunks();
+ primary = s.getPrimaryShard("test").getDB("test");
+ assert.eq(0, s.config.chunks.count(), "single chunk assertion");
+ assert.eq(num, primary.foo.count());
+
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+
+ // Make sure the collection's original chunk got split
+ s.printChunks();
+ assert.lt(20, s.config.chunks.count(), "many chunks assertion");
+ assert.eq(num, primary.foo.count());
+
+ s.printChangeLog();
+ s.stop();
})();
diff --git a/jstests/sharding/printShardingStatus.js b/jstests/sharding/printShardingStatus.js
index 2bac7da381a..05e6eca0d4f 100644
--- a/jstests/sharding/printShardingStatus.js
+++ b/jstests/sharding/printShardingStatus.js
@@ -2,242 +2,236 @@
// contains important information that it should, like the major section
// headings and the names of sharded collections and their shard keys.
+(function() {
-(function () {
+ var st = new ShardingTest({shards: 1, mongos: 2, config: 1, other: {smallfiles: true}});
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
-var st = new ShardingTest({ shards: 1, mongos: 2, config: 1, other: { smallfiles: true } });
-
-var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-
-
-function grabStatusOutput(configdb, verbose) {
- var res = print.captureAllOutput( function () {
+ function grabStatusOutput(configdb, verbose) {
+ var res = print.captureAllOutput(function() {
return printShardingStatus(configdb, verbose);
- } );
- var output = res.output.join("\n");
- jsTestLog(output);
- return output;
-}
-
-function assertPresentInOutput(output, content, what) {
- assert(output.includes(content), what + " \"" + content + "\" NOT present in output of "
- + "printShardingStatus() (but it should be)");
-}
-
-function assertNotPresentInOutput(output, content, what) {
- assert( ! output.includes(content), what + " \"" + content + "\" IS present in output of "
- + "printShardingStatus() (but it should not be)");
-}
-
-
-
-////////////////////////
-// Basic tests
-////////////////////////
-
-var dbName = "thisIsTheDatabase";
-var collName = "thisIsTheCollection";
-var shardKeyName = "thisIsTheShardKey";
-var nsName = dbName + "." + collName;
-
-assert.commandWorked( admin.runCommand({ enableSharding: dbName }) );
-var key = {};
-key[shardKeyName] = 1;
-assert.commandWorked( admin.runCommand({ shardCollection: nsName, key: key }) );
-
-
-function testBasic(output) {
- assertPresentInOutput(output, "shards:", "section header");
- assertPresentInOutput(output, "databases:", "section header");
- assertPresentInOutput(output, "balancer:", "section header");
- assertPresentInOutput(output, "active mongoses:", "section header");
- assertNotPresentInOutput(output, "most recently active mongoses:", "section header");
-
- assertPresentInOutput(output, dbName, "database");
- assertPresentInOutput(output, collName, "collection");
- assertPresentInOutput(output, shardKeyName, "shard key");
-}
-
-function testBasicNormalOnly(output) {
- assertPresentInOutput(output, tojson(version) + " : 2\n", "active mongos version");
-}
-
-function testBasicVerboseOnly(output) {
- assertPresentInOutput(output, '"mongoVersion" : ' + tojson(version), "active mongos version");
- assertPresentInOutput(output, '"_id" : ' + tojson(s1Host), "active mongos hostname");
- assertPresentInOutput(output, '"_id" : ' + tojson(s2Host), "active mongos hostname");
-}
-
-var buildinfo = assert.commandWorked( mongos.adminCommand("buildinfo") );
-var serverStatus1 = assert.commandWorked( mongos.adminCommand("serverStatus") );
-var serverStatus2 = assert.commandWorked( st.s1.adminCommand("serverStatus") );
-var version = buildinfo.version;
-var s1Host = serverStatus1.host;
-var s2Host = serverStatus2.host;
-
-
-// Normal, active mongoses
-var outputNormal = grabStatusOutput(st.config, false);
-testBasic(outputNormal);
-testBasicNormalOnly(outputNormal);
-
-var outputVerbose = grabStatusOutput(st.config, true);
-testBasic(outputVerbose);
-testBasicVerboseOnly(outputVerbose);
-
-
-// Take a copy of the config db, in order to test the harder-to-setup cases below.
-// TODO: Replace this manual copy with copydb once SERVER-13080 is fixed.
-var config = mongos.getDB("config");
-var configCopy = mongos.getDB("configCopy");
-config.getCollectionInfos().forEach( function (c) {
- // Create collection with options.
- assert.commandWorked( configCopy.createCollection(c.name, c.options) );
- // Clone the docs.
- config.getCollection(c.name).find().snapshot().forEach( function (d) {
- assert.writeOK( configCopy.getCollection(c.name).insert(d) );
- } );
- // Build the indexes.
- config.getCollection(c.name).getIndexes().forEach( function (i) {
- var key = i.key;
- delete i.key;
- delete i.ns;
- delete i.v;
- assert.commandWorked( configCopy.getCollection(c.name).ensureIndex(key, i) );
- } );
-} );
-
-
-// Inactive mongoses
-// Make the first ping be older than now by 1 second more than the threshold
-// Make the second ping be older still by the same amount again
-var pingAdjustMs = 60000 + 1000;
-var then = new Date();
-then.setTime(then.getTime() - pingAdjustMs);
-configCopy.mongos.update( { _id: s1Host }, { $set: { ping: then } } );
-then.setTime(then.getTime() - pingAdjustMs);
-configCopy.mongos.update( { _id: s2Host }, { $set: { ping: then } } );
-
-var output = grabStatusOutput(configCopy, false);
-assertPresentInOutput(output, "most recently active mongoses:", "section header");
-assertPresentInOutput(output, tojson(version) + " : 1\n", "recent mongos version");
-
-var output = grabStatusOutput(configCopy, true);
-assertPresentInOutput(output, "most recently active mongoses:", "section header");
-assertPresentInOutput(output, '"_id" : ' + tojson(s1Host), "recent mongos hostname");
-assertNotPresentInOutput(output, '"_id" : ' + tojson(s2Host), "old mongos hostname");
-
-
-// Older mongoses
-configCopy.mongos.remove( { _id: s1Host } );
-
-var output = grabStatusOutput(configCopy, false);
-assertPresentInOutput(output, "most recently active mongoses:", "section header");
-assertPresentInOutput(output, tojson(version) + " : 1\n", "recent mongos version");
-
-var output = grabStatusOutput(configCopy, true);
-assertPresentInOutput(output, "most recently active mongoses:", "section header");
-assertNotPresentInOutput(output, '"_id" : ' + tojson(s1Host), "removed mongos hostname");
-assertPresentInOutput(output, '"_id" : ' + tojson(s2Host), "recent mongos hostname");
-
-
-// No mongoses at all
-configCopy.mongos.remove({});
-
-var output = grabStatusOutput(configCopy, false);
-assertPresentInOutput(output, "most recently active mongoses:\n\tnone", "no mongoses");
-
-var output = grabStatusOutput(configCopy, true);
-assertPresentInOutput(output, "most recently active mongoses:\n\tnone", "no mongoses (verbose)");
-
-
-assert( mongos.getDB(dbName).dropDatabase() );
-
-
-
-////////////////////////
-// Extended tests
-////////////////////////
-
-var testCollDetailsNum = 0;
-function testCollDetails(args) {
- if (args === undefined || typeof(args) != "object") {
- args = {};
+ });
+ var output = res.output.join("\n");
+ jsTestLog(output);
+ return output;
}
- var getCollName = function (x) { return "test.test" + x.zeroPad(4); };
- var collName = getCollName(testCollDetailsNum);
-
- var cmdObj = { shardCollection: collName, key: { _id: 1 } };
- if (args.unique) {
- cmdObj.unique = true;
+ function assertPresentInOutput(output, content, what) {
+ assert(output.includes(content),
+ what + " \"" + content + "\" NOT present in output of " +
+ "printShardingStatus() (but it should be)");
}
- assert.commandWorked( admin.runCommand(cmdObj) );
- if (args.hasOwnProperty("unique")) {
- assert.writeOK( mongos.getDB("config").collections.update({ _id : collName },
- { $set : { "unique" : args.unique } }) );
- }
- if (args.hasOwnProperty("noBalance")) {
- assert.writeOK( mongos.getDB("config").collections.update({ _id : collName },
- { $set : { "noBalance" : args.noBalance } }) );
+ function assertNotPresentInOutput(output, content, what) {
+ assert(!output.includes(content),
+ what + " \"" + content + "\" IS present in output of " +
+ "printShardingStatus() (but it should not be)");
}
- var output = grabStatusOutput(st.config);
-
- assertPresentInOutput(output, collName, "collection");
- // If any of the previous collection names are present, then their optional indicators
- // might also be present. This might taint the results when we go searching through
- // the output.
- // This also means that earlier collNames can't be a prefix of later collNames.
- for (var i = 0; i < testCollDetailsNum; i++) {
- assertNotPresentInOutput(output, getCollName(i), "previous collection");
+ ////////////////////////
+ // Basic tests
+ ////////////////////////
+
+ var dbName = "thisIsTheDatabase";
+ var collName = "thisIsTheCollection";
+ var shardKeyName = "thisIsTheShardKey";
+ var nsName = dbName + "." + collName;
+
+ assert.commandWorked(admin.runCommand({enableSharding: dbName}));
+ var key = {};
+ key[shardKeyName] = 1;
+ assert.commandWorked(admin.runCommand({shardCollection: nsName, key: key}));
+
+ function testBasic(output) {
+ assertPresentInOutput(output, "shards:", "section header");
+ assertPresentInOutput(output, "databases:", "section header");
+ assertPresentInOutput(output, "balancer:", "section header");
+ assertPresentInOutput(output, "active mongoses:", "section header");
+ assertNotPresentInOutput(output, "most recently active mongoses:", "section header");
+
+ assertPresentInOutput(output, dbName, "database");
+ assertPresentInOutput(output, collName, "collection");
+ assertPresentInOutput(output, shardKeyName, "shard key");
}
- assertPresentInOutput(output, "unique: " + (!!args.unique), "unique shard key indicator");
- if (args.hasOwnProperty("unique") && typeof(args.unique) != "boolean") {
- // non-bool: actual value must be shown
- assertPresentInOutput(output, tojson(args.unique), "unique shard key indicator (non bool)");
+ function testBasicNormalOnly(output) {
+ assertPresentInOutput(output, tojson(version) + " : 2\n", "active mongos version");
}
- assertPresentInOutput(output,
- "balancing: " + (!args.noBalance),
- "balancing indicator (inverse of noBalance)");
- if (args.hasOwnProperty("noBalance") && typeof(args.noBalance) != "boolean") {
- // non-bool: actual value must be shown
- assertPresentInOutput(output, tojson(args.noBalance), "noBalance indicator (non bool)");
+ function testBasicVerboseOnly(output) {
+ assertPresentInOutput(
+ output, '"mongoVersion" : ' + tojson(version), "active mongos version");
+ assertPresentInOutput(output, '"_id" : ' + tojson(s1Host), "active mongos hostname");
+ assertPresentInOutput(output, '"_id" : ' + tojson(s2Host), "active mongos hostname");
}
- assert( mongos.getCollection(collName).drop() );
-
- testCollDetailsNum++;
-}
-
-assert.commandWorked( admin.runCommand({ enableSharding: "test" }) );
-
-// Defaults
-testCollDetails({ });
+ var buildinfo = assert.commandWorked(mongos.adminCommand("buildinfo"));
+ var serverStatus1 = assert.commandWorked(mongos.adminCommand("serverStatus"));
+ var serverStatus2 = assert.commandWorked(st.s1.adminCommand("serverStatus"));
+ var version = buildinfo.version;
+ var s1Host = serverStatus1.host;
+ var s2Host = serverStatus2.host;
+
+ // Normal, active mongoses
+ var outputNormal = grabStatusOutput(st.config, false);
+ testBasic(outputNormal);
+ testBasicNormalOnly(outputNormal);
+
+ var outputVerbose = grabStatusOutput(st.config, true);
+ testBasic(outputVerbose);
+ testBasicVerboseOnly(outputVerbose);
+
+ // Take a copy of the config db, in order to test the harder-to-setup cases below.
+ // TODO: Replace this manual copy with copydb once SERVER-13080 is fixed.
+ var config = mongos.getDB("config");
+ var configCopy = mongos.getDB("configCopy");
+ config.getCollectionInfos().forEach(function(c) {
+ // Create collection with options.
+ assert.commandWorked(configCopy.createCollection(c.name, c.options));
+ // Clone the docs.
+ config.getCollection(c.name).find().snapshot().forEach(function(d) {
+ assert.writeOK(configCopy.getCollection(c.name).insert(d));
+ });
+ // Build the indexes.
+ config.getCollection(c.name).getIndexes().forEach(function(i) {
+ var key = i.key;
+ delete i.key;
+ delete i.ns;
+ delete i.v;
+ assert.commandWorked(configCopy.getCollection(c.name).ensureIndex(key, i));
+ });
+ });
+
+ // Inactive mongoses
+ // Make the first ping be older than now by 1 second more than the threshold
+ // Make the second ping be older still by the same amount again
+ var pingAdjustMs = 60000 + 1000;
+ var then = new Date();
+ then.setTime(then.getTime() - pingAdjustMs);
+ configCopy.mongos.update({_id: s1Host}, {$set: {ping: then}});
+ then.setTime(then.getTime() - pingAdjustMs);
+ configCopy.mongos.update({_id: s2Host}, {$set: {ping: then}});
+
+ var output = grabStatusOutput(configCopy, false);
+ assertPresentInOutput(output, "most recently active mongoses:", "section header");
+ assertPresentInOutput(output, tojson(version) + " : 1\n", "recent mongos version");
+
+ var output = grabStatusOutput(configCopy, true);
+ assertPresentInOutput(output, "most recently active mongoses:", "section header");
+ assertPresentInOutput(output, '"_id" : ' + tojson(s1Host), "recent mongos hostname");
+ assertNotPresentInOutput(output, '"_id" : ' + tojson(s2Host), "old mongos hostname");
+
+ // Older mongoses
+ configCopy.mongos.remove({_id: s1Host});
+
+ var output = grabStatusOutput(configCopy, false);
+ assertPresentInOutput(output, "most recently active mongoses:", "section header");
+ assertPresentInOutput(output, tojson(version) + " : 1\n", "recent mongos version");
+
+ var output = grabStatusOutput(configCopy, true);
+ assertPresentInOutput(output, "most recently active mongoses:", "section header");
+ assertNotPresentInOutput(output, '"_id" : ' + tojson(s1Host), "removed mongos hostname");
+ assertPresentInOutput(output, '"_id" : ' + tojson(s2Host), "recent mongos hostname");
+
+ // No mongoses at all
+ configCopy.mongos.remove({});
+
+ var output = grabStatusOutput(configCopy, false);
+ assertPresentInOutput(output, "most recently active mongoses:\n\tnone", "no mongoses");
+
+ var output = grabStatusOutput(configCopy, true);
+ assertPresentInOutput(
+ output, "most recently active mongoses:\n\tnone", "no mongoses (verbose)");
+
+ assert(mongos.getDB(dbName).dropDatabase());
+
+ ////////////////////////
+ // Extended tests
+ ////////////////////////
+
+ var testCollDetailsNum = 0;
+ function testCollDetails(args) {
+ if (args === undefined || typeof(args) != "object") {
+ args = {};
+ }
+
+ var getCollName = function(x) {
+ return "test.test" + x.zeroPad(4);
+ };
+ var collName = getCollName(testCollDetailsNum);
+
+ var cmdObj = {
+ shardCollection: collName,
+ key: {_id: 1}
+ };
+ if (args.unique) {
+ cmdObj.unique = true;
+ }
+ assert.commandWorked(admin.runCommand(cmdObj));
+
+ if (args.hasOwnProperty("unique")) {
+ assert.writeOK(mongos.getDB("config").collections.update(
+ {_id: collName}, {$set: {"unique": args.unique}}));
+ }
+ if (args.hasOwnProperty("noBalance")) {
+ assert.writeOK(mongos.getDB("config").collections.update(
+ {_id: collName}, {$set: {"noBalance": args.noBalance}}));
+ }
+
+ var output = grabStatusOutput(st.config);
+
+ assertPresentInOutput(output, collName, "collection");
+ // If any of the previous collection names are present, then their optional indicators
+ // might also be present. This might taint the results when we go searching through
+ // the output.
+ // This also means that earlier collNames can't be a prefix of later collNames.
+ for (var i = 0; i < testCollDetailsNum; i++) {
+ assertNotPresentInOutput(output, getCollName(i), "previous collection");
+ }
+
+ assertPresentInOutput(output, "unique: " + (!!args.unique), "unique shard key indicator");
+ if (args.hasOwnProperty("unique") && typeof(args.unique) != "boolean") {
+ // non-bool: actual value must be shown
+ assertPresentInOutput(
+ output, tojson(args.unique), "unique shard key indicator (non bool)");
+ }
+
+ assertPresentInOutput(output,
+ "balancing: " + (!args.noBalance),
+ "balancing indicator (inverse of noBalance)");
+ if (args.hasOwnProperty("noBalance") && typeof(args.noBalance) != "boolean") {
+ // non-bool: actual value must be shown
+ assertPresentInOutput(output, tojson(args.noBalance), "noBalance indicator (non bool)");
+ }
+
+ assert(mongos.getCollection(collName).drop());
+
+ testCollDetailsNum++;
+ }
-// Expected values
-testCollDetails({ unique: false, noBalance: false });
-testCollDetails({ unique: true, noBalance: true });
+ assert.commandWorked(admin.runCommand({enableSharding: "test"}));
-// Unexpected truthy values
-testCollDetails({ unique: "truthy unique value 1", noBalance: "truthy noBalance value 1" });
-testCollDetails({ unique: 1, noBalance: 1 });
-testCollDetails({ unique: -1, noBalance: -1 });
-testCollDetails({ unique: {}, noBalance: {} });
+ // Defaults
+ testCollDetails({});
-// Unexpected falsy values
-testCollDetails({ unique: "", noBalance: "" });
-testCollDetails({ unique: 0, noBalance: 0 });
+ // Expected values
+ testCollDetails({unique: false, noBalance: false});
+ testCollDetails({unique: true, noBalance: true});
-assert( mongos.getDB("test").dropDatabase() );
+ // Unexpected truthy values
+ testCollDetails({unique: "truthy unique value 1", noBalance: "truthy noBalance value 1"});
+ testCollDetails({unique: 1, noBalance: 1});
+ testCollDetails({unique: -1, noBalance: -1});
+ testCollDetails({unique: {}, noBalance: {}});
+ // Unexpected falsy values
+ testCollDetails({unique: "", noBalance: ""});
+ testCollDetails({unique: 0, noBalance: 0});
+ assert(mongos.getDB("test").dropDatabase());
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/query_after_multi_write.js b/jstests/sharding/query_after_multi_write.js
index 74867dfd970..a952484435c 100644
--- a/jstests/sharding/query_after_multi_write.js
+++ b/jstests/sharding/query_after_multi_write.js
@@ -1,70 +1,63 @@
(function() {
-"use strict";
-
-/**
- * Test that queries will be properly routed after executing a write that does not
- * perform any shard version checks.
- */
-var runTest = function(writeFunc) {
- var st = new ShardingTest({ shards: 2, mongos: 2 });
-
- var testDB = st.s.getDB('test');
- testDB.dropDatabase();
-
- assert.commandWorked(testDB.adminCommand({ enableSharding: 'test' }));
- st.ensurePrimaryShard('test', 'shard0000');
-
- assert.commandWorked(testDB.adminCommand({ shardCollection: 'test.user', key: { x: 1 }}));
- assert.commandWorked(testDB.adminCommand({ split: 'test.user', middle: { x: 0 }}));
- assert.commandWorked(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0001',
- _waitForDelete: true }));
-
- var testDB2 = st.s1.getDB('test');
- testDB2.user.insert({ x: 123456 });
-
- // Move chunk to bump version on a different mongos.
- assert.commandWorked(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0000',
- _waitForDelete: true }));
-
- // Issue a query and make sure it gets routed to the right shard.
- assert.neq(null, testDB2.user.findOne({ x: 123456 }));
-
- // At this point, s1 thinks the version of 'test.user' is 2, bounce it again so it gets
- // incremented to 3
- assert.commandWorked(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0001',
- _waitForDelete: true }));
-
- // Issue a query and make sure it gets routed to the right shard again.
- assert.neq(null, testDB2.user.findOne({ x: 123456 }));
-
- // At this point, s0 thinks the version of 'test.user' is 3, bounce it again so it gets
- // incremented to 4
- assert.commandWorked(testDB.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0000',
- _waitForDelete: true }));
-
- // Ensure that write commands with multi version do not reset the connection shard version to
- // ignored.
- writeFunc(testDB2);
-
- assert.neq(null, testDB2.user.findOne({ x: 123456 }));
-
- st.stop();
-};
-
-runTest(function(db) {
- db.user.update({}, { $inc: { y: 987654 }}, false, true);
-});
-
-runTest(function(db) {
- db.user.remove({ y: 'noMatch' }, false);
-});
+ "use strict";
+
+ /**
+ * Test that queries will be properly routed after executing a write that does not
+ * perform any shard version checks.
+ */
+ var runTest = function(writeFunc) {
+ var st = new ShardingTest({shards: 2, mongos: 2});
+
+ var testDB = st.s.getDB('test');
+ testDB.dropDatabase();
+
+ assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', 'shard0000');
+
+ assert.commandWorked(testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+ assert.commandWorked(testDB.adminCommand({split: 'test.user', middle: {x: 0}}));
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: 'shard0001', _waitForDelete: true}));
+
+ var testDB2 = st.s1.getDB('test');
+ testDB2.user.insert({x: 123456});
+
+ // Move chunk to bump version on a different mongos.
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: 'shard0000', _waitForDelete: true}));
+
+ // Issue a query and make sure it gets routed to the right shard.
+ assert.neq(null, testDB2.user.findOne({x: 123456}));
+
+ // At this point, s1 thinks the version of 'test.user' is 2, bounce it again so it gets
+ // incremented to 3
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: 'shard0001', _waitForDelete: true}));
+
+ // Issue a query and make sure it gets routed to the right shard again.
+ assert.neq(null, testDB2.user.findOne({x: 123456}));
+
+ // At this point, s0 thinks the version of 'test.user' is 3, bounce it again so it gets
+ // incremented to 4
+ assert.commandWorked(testDB.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: 'shard0000', _waitForDelete: true}));
+
+ // Ensure that write commands with multi version do not reset the connection shard version
+ // to
+ // ignored.
+ writeFunc(testDB2);
+
+ assert.neq(null, testDB2.user.findOne({x: 123456}));
+
+ st.stop();
+ };
+
+ runTest(function(db) {
+ db.user.update({}, {$inc: {y: 987654}}, false, true);
+ });
+
+ runTest(function(db) {
+ db.user.remove({y: 'noMatch'}, false);
+ });
})();
diff --git a/jstests/sharding/query_config.js b/jstests/sharding/query_config.js
index dea4cf92258..c6b08b8b7c0 100644
--- a/jstests/sharding/query_config.js
+++ b/jstests/sharding/query_config.js
@@ -9,13 +9,14 @@
};
var getListIndexesCursor = function(coll, options, subsequentBatchSize) {
- return new DBCommandCursor(coll.getDB().getMongo(),
- coll.runCommand("listIndexes", options),
- subsequentBatchSize);
+ return new DBCommandCursor(
+ coll.getDB().getMongo(), coll.runCommand("listIndexes", options), subsequentBatchSize);
};
var arrayGetNames = function(array) {
- return array.map(function(spec) { return spec.name; });
+ return array.map(function(spec) {
+ return spec.name;
+ });
};
var cursorGetCollectionNames = function(cursor) {
@@ -23,7 +24,9 @@
};
var sortArrayByName = function(array) {
- return array.sort(function(a, b) { return a.name > b.name; });
+ return array.sort(function(a, b) {
+ return a.name > b.name;
+ });
};
var cursorGetIndexNames = function(cursor) {
@@ -31,13 +34,15 @@
};
var sortArrayById = function(array) {
- return array.sort(function(a, b) { return a._id > b._id; });
+ return array.sort(function(a, b) {
+ return a._id > b._id;
+ });
};
var dropCollectionIfExists = function(coll) {
try {
coll.drop();
- } catch(err) {
+ } catch (err) {
assert.eq(err.code, ErrorCodes.NamespaceNotFound);
}
};
@@ -51,15 +56,17 @@
// testKeys and testCollNames are parallel arrays, testKeys contains the shard key of the
// corresponding collection whose name is in testCollNames.
var testCollNames = ["4a1", "1a12", "3a1b1", "2a1b1c1", "b1", "b1c1", "d1"];
- var testKeys = [{a: 1}, {a: 1}, {a: 1, b: 1}, {a: 1, b: 1, c: 1}, {b: 1}, {b: 1, c: 1},
- {d: 1}];
+ var testKeys =
+ [{a: 1}, {a: 1}, {a: 1, b: 1}, {a: 1, b: 1, c: 1}, {b: 1}, {b: 1, c: 1}, {d: 1}];
var testDB = st.s.getDB("test");
assert.commandWorked(st.s.adminCommand({enablesharding: testDB.getName()}));
- var testNamespaces = testCollNames.map(function(e) { return testDB.getName() + "." + e; });
+ var testNamespaces = testCollNames.map(function(e) {
+ return testDB.getName() + "." + e;
+ });
for (var i = 0; i < testKeys.length; i++) {
- assert.commandWorked(st.s.adminCommand({shardcollection: testNamespaces[i],
- key: testKeys[i]}));
+ assert.commandWorked(
+ st.s.adminCommand({shardcollection: testNamespaces[i], key: testKeys[i]}));
}
return testNamespaces;
@@ -72,8 +79,19 @@
var testListConfigCollections = function(st) {
// This test depends on all the collections in the configCollList being in the config
// database.
- var configCollList = ["changelog", "chunks", "collections", "databases", "lockpings",
- "locks", "mongos", "settings", "shards", "tags", "version"];
+ var configCollList = [
+ "changelog",
+ "chunks",
+ "collections",
+ "databases",
+ "lockpings",
+ "locks",
+ "mongos",
+ "settings",
+ "shards",
+ "tags",
+ "version"
+ ];
var configDB = st.s.getDB("config");
var userAddedColl = configDB.userAddedColl;
var cursor;
@@ -134,8 +152,8 @@
// Find query.
cursor = configDB.collections.find({"key.a": 1}, {dropped: 1, "key.a": 1, "key.c": 1})
- .sort({"_id": 1})
- .batchSize(2);
+ .sort({"_id": 1})
+ .batchSize(2);
assert.eq(cursor.objsLeftInBatch(), 2);
assert.eq(cursor.next(), {_id: testNamespaces[1], dropped: false, key: {a: 1}});
assert.eq(cursor.next(), {_id: testNamespaces[3], dropped: false, key: {a: 1, c: 1}});
@@ -146,9 +164,11 @@
assert(!cursor.hasNext());
// Aggregate query.
- cursor = configDB.collections.aggregate([{$match: {"key.b": 1}},
- {$sort: {"_id": 1}},
- {$project: {"keyb":"$key.b", "keyc":"$key.c"}}],
+ cursor = configDB.collections.aggregate([
+ {$match: {"key.b": 1}},
+ {$sort: {"_id": 1}},
+ {$project: {"keyb": "$key.b", "keyc": "$key.c"}}
+ ],
{cursor: {batchSize: 2}});
assert.eq(cursor.objsLeftInBatch(), 2);
assert.eq(cursor.next(), {_id: testNamespaces[3], keyb: 1, keyc: 1});
@@ -175,7 +195,7 @@
var result;
// Get shard names.
- cursor = configDB.shards.find().sort({_id:1});
+ cursor = configDB.shards.find().sort({_id: 1});
var shard1 = cursor.next()._id;
var shard2 = cursor.next()._id;
assert(!cursor.hasNext());
@@ -183,8 +203,8 @@
st.ensurePrimaryShard(testDB.getName(), shard1);
// Setup.
- assert.commandWorked(st.s.adminCommand({shardcollection: testColl.getFullName(),
- key: {e: 1}}));
+ assert.commandWorked(
+ st.s.adminCommand({shardcollection: testColl.getFullName(), key: {e: 1}}));
for (var i = 0; i < testCollData.length; i++) {
assert.writeOK(testColl.insert(testCollData[i]));
}
@@ -192,19 +212,16 @@
assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 6}}));
assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 8}}));
assert.commandWorked(st.s.adminCommand({split: testColl.getFullName(), middle: {e: 11}}));
- assert.commandWorked(st.s.adminCommand({movechunk: testColl.getFullName(),
- find: {e: 1},
- to: shard2}));
- assert.commandWorked(st.s.adminCommand({movechunk: testColl.getFullName(),
- find: {e: 9},
- to: shard2}));
- assert.commandWorked(st.s.adminCommand({movechunk: testColl.getFullName(),
- find: {e: 12},
- to: shard2}));
+ assert.commandWorked(
+ st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 1}, to: shard2}));
+ assert.commandWorked(
+ st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 9}, to: shard2}));
+ assert.commandWorked(
+ st.s.adminCommand({movechunk: testColl.getFullName(), find: {e: 12}, to: shard2}));
// Find query.
cursor = configDB.chunks.find({ns: testColl.getFullName()},
- {_id:0, min:1, max:1, shard:1}).sort({"min.e":1});
+ {_id: 0, min: 1, max: 1, shard: 1}).sort({"min.e": 1});
assert.eq(cursor.next(), {min: {e: {"$minKey": 1}}, "max": {"e": 2}, shard: shard2});
assert.eq(cursor.next(), {min: {e: 2}, max: {e: 6}, shard: shard1});
assert.eq(cursor.next(), {min: {e: 6}, max: {e: 8}, shard: shard1});
@@ -219,13 +236,20 @@
assert.eq(configDB.chunks.distinct("shard").sort(), [shard1, shard2]);
// Group query.
- result = configDB.chunks.group({key: {shard: 1},
- cond: {ns: testColl.getFullName()},
- reduce: function(curr, res) { res.chunks++; },
- initial: {chunks: 0},
- finalize: function(res) { res._id = res.shard; }});
- assert.eq(sortArrayById(result), [{shard: shard1, chunks: 2, _id: shard1},
- {shard: shard2, chunks: 3, _id: shard2}]);
+ result = configDB.chunks.group({
+ key: {shard: 1},
+ cond: {ns: testColl.getFullName()},
+ reduce: function(curr, res) {
+ res.chunks++;
+ },
+ initial: {chunks: 0},
+ finalize: function(res) {
+ res._id = res.shard;
+ }
+ });
+ assert.eq(
+ sortArrayById(result),
+ [{shard: shard1, chunks: 2, _id: shard1}, {shard: shard2, chunks: 3, _id: shard2}]);
// Map reduce query.
var mapFunction = function() {
@@ -234,12 +258,14 @@
}
};
var reduceFunction = function(key, values) {
- return {chunks: values.length};
+ return {
+ chunks: values.length
+ };
};
result = configDB.chunks.mapReduce(mapFunction, reduceFunction, {out: {inline: 1}});
assert.eq(result.ok, 1);
- assert.eq(sortArrayById(result.results), [{_id: shard1, value: {chunks: 2}},
- {_id: shard2, value: {chunks: 3}}]);
+ assert.eq(sortArrayById(result.results),
+ [{_id: shard1, value: {chunks: 2}}, {_id: shard2, value: {chunks: 3}}]);
};
/**
@@ -247,13 +273,15 @@
*/
var queryUserCreated = function(database) {
var userColl = database.userColl;
- var userCollData = [{_id: 1, g: 1, c: 4, s: "c", u: [1, 2]},
- {_id: 2, g: 1, c: 5, s: "b", u: [1]},
- {_id: 3, g: 2, c: 16, s: "g", u: [3]},
- {_id: 4, g: 2, c: 1, s: "a", u: [2, 4]},
- {_id: 5, g: 2, c: 18, s: "d", u: [3]},
- {_id: 6, g: 3, c: 11, s: "e", u: [2, 3]},
- {_id: 7, g: 3, c: 2, s: "f", u: [1]}];
+ var userCollData = [
+ {_id: 1, g: 1, c: 4, s: "c", u: [1, 2]},
+ {_id: 2, g: 1, c: 5, s: "b", u: [1]},
+ {_id: 3, g: 2, c: 16, s: "g", u: [3]},
+ {_id: 4, g: 2, c: 1, s: "a", u: [2, 4]},
+ {_id: 5, g: 2, c: 18, s: "d", u: [3]},
+ {_id: 6, g: 3, c: 11, s: "e", u: [2, 3]},
+ {_id: 7, g: 3, c: 2, s: "f", u: [1]}
+ ];
var userCollIndexes = ["_id_", "s_1"];
var cursor;
var cursorArray;
@@ -294,10 +322,12 @@
assert(!cursor.hasNext());
// Aggregate query.
- cursor = userColl.aggregate([{$match: {c: {$gt: 1}}},
- {$unwind: "$u"},
- {$group: {_id: "$u", sum: {$sum: "$c"}}},
- {$sort: {_id: 1}}],
+ cursor = userColl.aggregate([
+ {$match: {c: {$gt: 1}}},
+ {$unwind: "$u"},
+ {$group: {_id: "$u", sum: {$sum: "$c"}}},
+ {$sort: {_id: 1}}
+ ],
{cursor: {batchSize: 2}});
assert.eq(cursor.objsLeftInBatch(), 2);
assert.eq(cursor.next(), {_id: 1, sum: 11});
@@ -317,26 +347,36 @@
assert.eq(userColl.distinct("g").sort(), [1, 2, 3]);
// Group query.
- result = userColl.group({key: {g: 1},
- reduce: function(curr, res) { res.prod *= curr.c; },
- initial: {prod: 1},
- finalize: function(res) { res._id = res.g; }});
- assert.eq(sortArrayById(result), [{g: 1, prod: 20, _id: 1},
- {g: 2, prod: 288, _id: 2},
- {g: 3, prod: 22, _id: 3}]);
+ result = userColl.group({
+ key: {g: 1},
+ reduce: function(curr, res) {
+ res.prod *= curr.c;
+ },
+ initial: {prod: 1},
+ finalize: function(res) {
+ res._id = res.g;
+ }
+ });
+ assert.eq(sortArrayById(result),
+ [{g: 1, prod: 20, _id: 1}, {g: 2, prod: 288, _id: 2}, {g: 3, prod: 22, _id: 3}]);
// Map reduce query.
var mapFunction = function() {
emit(this.g, 1);
};
var reduceFunction = function(key, values) {
- return {count: values.length};
+ return {
+ count: values.length
+ };
};
result = userColl.mapReduce(mapFunction, reduceFunction, {out: {inline: 1}});
assert.eq(result.ok, 1);
- assert.eq(sortArrayById(result.results), [{_id: 1, value: {count: 2}},
- {_id: 2, value: {count: 3}},
- {_id: 3, value: {count: 2}}]);
+ assert.eq(sortArrayById(result.results),
+ [
+ {_id: 1, value: {count: 2}},
+ {_id: 2, value: {count: 3}},
+ {_id: 3, value: {count: 2}}
+ ]);
assert(userColl.drop());
};
diff --git a/jstests/sharding/query_sharded.js b/jstests/sharding/query_sharded.js
index 2a4089c69cf..7cb698c4477 100644
--- a/jstests/sharding/query_sharded.js
+++ b/jstests/sharding/query_sharded.js
@@ -2,9 +2,7 @@
// Tests mongos-only query behavior
//
-var st = new ShardingTest({shards : 1,
- mongos : 1,
- verbose : 0});
+var st = new ShardingTest({shards: 1, mongos: 1, verbose: 0});
var mongos = st.s0;
var coll = mongos.getCollection("foo.bar");
@@ -13,22 +11,26 @@ var coll = mongos.getCollection("foo.bar");
//
// Ensure we can't use exhaust option through mongos
coll.remove({});
-assert.writeOK(coll.insert({a : 'b'}));
+assert.writeOK(coll.insert({a: 'b'}));
var query = coll.find({});
assert.neq(null, query.next());
query = coll.find({}).addOption(DBQuery.Option.exhaust);
-assert.throws(function(){ query.next(); });
+assert.throws(function() {
+ query.next();
+});
//
//
// Ensure we can't trick mongos by inserting exhaust option on a command through mongos
coll.remove({});
-assert.writeOK(coll.insert({a : 'b'}));
+assert.writeOK(coll.insert({a: 'b'}));
var cmdColl = mongos.getCollection(coll.getDB().toString() + ".$cmd");
-var cmdQuery = cmdColl.find({ ping : 1 }).limit(1);
+var cmdQuery = cmdColl.find({ping: 1}).limit(1);
assert.commandWorked(cmdQuery.next());
-cmdQuery = cmdColl.find({ ping : 1 }).limit(1).addOption(DBQuery.Option.exhaust);
-assert.throws(function(){ cmdQuery.next(); });
+cmdQuery = cmdColl.find({ping: 1}).limit(1).addOption(DBQuery.Option.exhaust);
+assert.throws(function() {
+ cmdQuery.next();
+});
jsTest.log("DONE!");
diff --git a/jstests/sharding/read_after_optime.js b/jstests/sharding/read_after_optime.js
index 442e8cc8ef6..e78e7394a84 100644
--- a/jstests/sharding/read_after_optime.js
+++ b/jstests/sharding/read_after_optime.js
@@ -36,24 +36,17 @@
var pingIntervalSeconds = 10;
var timeoutResult = assert.commandFailedWithCode(
runFindCommand(new Timestamp(lastOp.ts.getTime() + pingIntervalSeconds * 5, 0)),
- ErrorCodes.ExceededTimeLimit
- );
+ ErrorCodes.ExceededTimeLimit);
assert.gt(timeoutResult.waitedMS, 500);
var msg = 'Command on database local timed out waiting for read concern to be satisfied.';
- assert.soon(
- function() {
- var logMessages =
- assert.commandWorked(primaryConn.adminCommand({getLog: 'global'})).log;
- for (var i = 0; i < logMessages.length; i++) {
- if (logMessages[i].indexOf(msg) != -1) {
- return true;
- }
+ assert.soon(function() {
+ var logMessages = assert.commandWorked(primaryConn.adminCommand({getLog: 'global'})).log;
+ for (var i = 0; i < logMessages.length; i++) {
+ if (logMessages[i].indexOf(msg) != -1) {
+ return true;
}
- return false;
- },
- 'Did not see any log entries containing the following message: ' + msg,
- 60000,
- 300
- );
+ }
+ return false;
+ }, 'Did not see any log entries containing the following message: ' + msg, 60000, 300);
})();
diff --git a/jstests/sharding/read_does_not_create_namespaces.js b/jstests/sharding/read_does_not_create_namespaces.js
index db3c098c0fc..8ee48576ba1 100644
--- a/jstests/sharding/read_does_not_create_namespaces.js
+++ b/jstests/sharding/read_does_not_create_namespaces.js
@@ -2,15 +2,15 @@
// cause entries to be created in the catalog.
(function() {
-var shardingTest = new ShardingTest({ name: 'read_does_not_create_namespaces', shards: 1 });
-var db = shardingTest.getDB('NonExistentDB');
+ var shardingTest = new ShardingTest({name: 'read_does_not_create_namespaces', shards: 1});
+ var db = shardingTest.getDB('NonExistentDB');
-assert.isnull(db.nonExistentColl.findOne({}));
+ assert.isnull(db.nonExistentColl.findOne({}));
-// Neither the database nor the collection should have been created
-assert.isnull(shardingTest.getDB('config').databases.findOne({ _id: 'NonExistentDB' }));
-assert.eq(-1, shardingTest.shard0.getDBNames().indexOf('NonExistentDB'));
+ // Neither the database nor the collection should have been created
+ assert.isnull(shardingTest.getDB('config').databases.findOne({_id: 'NonExistentDB'}));
+ assert.eq(-1, shardingTest.shard0.getDBNames().indexOf('NonExistentDB'));
-shardingTest.stop();
+ shardingTest.stop();
})();
diff --git a/jstests/sharding/read_pref.js b/jstests/sharding/read_pref.js
index 7b92eb0d1b4..aadd8903344 100755..100644
--- a/jstests/sharding/read_pref.js
+++ b/jstests/sharding/read_pref.js
@@ -3,19 +3,14 @@
* can be found in dbtests/replica_set_monitor_test.cpp.
*/
-var PRI_TAG = { dc: 'ny' };
-var SEC_TAGS = [
- { dc: 'sf', s: "1" },
- { dc: 'ma', s: "2" },
- { dc: 'eu', s: "3" },
- { dc: 'jp', s: "4" }
-];
+var PRI_TAG = {
+ dc: 'ny'
+};
+var SEC_TAGS = [{dc: 'sf', s: "1"}, {dc: 'ma', s: "2"}, {dc: 'eu', s: "3"}, {dc: 'jp', s: "4"}];
var NODES = SEC_TAGS.length + 1;
var doTest = function(useDollarQuerySyntax) {
- var st = new ShardingTest({ shards: {
- rs0: { nodes: NODES, oplogSize: 10, useHostName: true }
- }});
+ var st = new ShardingTest({shards: {rs0: {nodes: NODES, oplogSize: 10, useHostName: true}}});
var replTest = st.rs0;
var primaryNode = replTest.getPrimary();
@@ -25,25 +20,24 @@ var doTest = function(useDollarQuerySyntax) {
return;
}
- var setupConf = function(){
- var replConf = primaryNode.getDB( 'local' ).system.replset.findOne();
+ var setupConf = function() {
+ var replConf = primaryNode.getDB('local').system.replset.findOne();
replConf.version = (replConf.version || 0) + 1;
var secIdx = 0;
- for ( var x = 0; x < NODES; x++ ){
+ for (var x = 0; x < NODES; x++) {
var node = replConf.members[x];
- if ( node.host == primaryNode.name ){
+ if (node.host == primaryNode.name) {
node.tags = PRI_TAG;
- }
- else {
+ } else {
node.tags = SEC_TAGS[secIdx++];
node.priority = 0;
}
}
try {
- primaryNode.getDB( 'admin' ).runCommand({ replSetReconfig: replConf });
+ primaryNode.getDB('admin').runCommand({replSetReconfig: replConf});
} catch (x) {
jsTest.log('Exception expected because reconfiguring would close all conn, got ' + x);
}
@@ -51,25 +45,25 @@ var doTest = function(useDollarQuerySyntax) {
return replConf;
};
- var checkTag = function( nodeToCheck, tag ){
- for ( var idx = 0; idx < NODES; idx++ ){
+ var checkTag = function(nodeToCheck, tag) {
+ for (var idx = 0; idx < NODES; idx++) {
var node = replConf.members[idx];
- if ( node.host == nodeToCheck ){
- jsTest.log( 'node[' + node.host + '], Tag: ' + tojson( node['tags'] ));
- jsTest.log( 'tagToCheck: ' + tojson( tag ));
+ if (node.host == nodeToCheck) {
+ jsTest.log('node[' + node.host + '], Tag: ' + tojson(node['tags']));
+ jsTest.log('tagToCheck: ' + tojson(tag));
var nodeTag = node['tags'];
- for ( var key in tag ){
- assert.eq( tag[key], nodeTag[key] );
+ for (var key in tag) {
+ assert.eq(tag[key], nodeTag[key]);
}
return;
}
}
- assert( false, 'node ' + nodeToCheck + ' not part of config!' );
+ assert(false, 'node ' + nodeToCheck + ' not part of config!');
};
var replConf = setupConf();
@@ -77,17 +71,16 @@ var doTest = function(useDollarQuerySyntax) {
var conn = st.s;
// Wait until the ReplicaSetMonitor refreshes its view and see the tags
- ReplSetTest.awaitRSClientHosts( conn, primaryNode,
- { ok: true, tags: PRI_TAG }, replTest.name );
+ ReplSetTest.awaitRSClientHosts(conn, primaryNode, {ok: true, tags: PRI_TAG}, replTest.name);
replTest.awaitReplication();
jsTest.log('New rs config: ' + tojson(primaryNode.getDB('local').system.replset.findOne()));
- jsTest.log( 'connpool: ' + tojson(conn.getDB('admin').runCommand({ connPoolStats: 1 })));
+ jsTest.log('connpool: ' + tojson(conn.getDB('admin').runCommand({connPoolStats: 1})));
- var coll = conn.getDB( 'test' ).user;
+ var coll = conn.getDB('test').user;
assert.soon(function() {
- var res = coll.insert({ x: 1 }, { writeConcern: { w: NODES }});
+ var res = coll.insert({x: 1}, {writeConcern: {w: NODES}});
if (!res.hasWriteError()) {
return true;
}
@@ -110,10 +103,10 @@ var doTest = function(useDollarQuerySyntax) {
readPrefObj.tags = readPrefTags;
}
- return coll.find({ $query: {}, $readPreference: readPrefObj,
- $explain: true }).limit(-1).next();
- }
- else {
+ return coll.find({$query: {}, $readPreference: readPrefObj, $explain: true})
+ .limit(-1)
+ .next();
+ } else {
return coll.find().readPref(readPrefMode, readPrefTags).explain("executionStats");
}
};
@@ -127,26 +120,26 @@ var doTest = function(useDollarQuerySyntax) {
// Read pref should work without slaveOk
var explain = getExplain("secondary");
var explainServer = getExplainServer(explain);
- assert.neq( primaryNode.name, explainServer );
+ assert.neq(primaryNode.name, explainServer);
conn.setSlaveOk();
// It should also work with slaveOk
explain = getExplain("secondary");
explainServer = getExplainServer(explain);
- assert.neq( primaryNode.name, explainServer );
+ assert.neq(primaryNode.name, explainServer);
// Check that $readPreference does not influence the actual query
- assert.eq( 1, explain.executionStats.nReturned );
+ assert.eq(1, explain.executionStats.nReturned);
- explain = getExplain("secondaryPreferred", [{ s: "2" }]);
+ explain = getExplain("secondaryPreferred", [{s: "2"}]);
explainServer = getExplainServer(explain);
- checkTag( explainServer, { s: "2" });
- assert.eq( 1, explain.executionStats.nReturned );
+ checkTag(explainServer, {s: "2"});
+ assert.eq(1, explain.executionStats.nReturned);
// Cannot use tags with primaryOnly
- assert.throws( function() {
- getExplain("primary", [{ s: "2" }]);
+ assert.throws(function() {
+ getExplain("primary", [{s: "2"}]);
});
// Ok to use empty tags on primaryOnly
@@ -159,44 +152,43 @@ var doTest = function(useDollarQuerySyntax) {
assert.eq(primaryNode.name, explainServer);
// Check that mongos will try the next tag if nothing matches the first
- explain = getExplain("secondary", [{ z: "3" }, { dc: "jp" }]);
+ explain = getExplain("secondary", [{z: "3"}, {dc: "jp"}]);
explainServer = getExplainServer(explain);
- checkTag( explainServer, { dc: "jp" });
- assert.eq( 1, explain.executionStats.nReturned );
+ checkTag(explainServer, {dc: "jp"});
+ assert.eq(1, explain.executionStats.nReturned);
// Check that mongos will fallback to primary if none of tags given matches
- explain = getExplain("secondaryPreferred", [{ z: "3" }, { dc: "ph" }]);
+ explain = getExplain("secondaryPreferred", [{z: "3"}, {dc: "ph"}]);
explainServer = getExplainServer(explain);
// Call getPrimary again since the primary could have changed after the restart.
assert.eq(replTest.getPrimary().name, explainServer);
- assert.eq( 1, explain.executionStats.nReturned );
+ assert.eq(1, explain.executionStats.nReturned);
// Kill all members except one
var stoppedNodes = [];
- for ( var x = 0; x < NODES - 1; x++ ){
- replTest.stop( x );
- stoppedNodes.push( replTest.nodes[x] );
+ for (var x = 0; x < NODES - 1; x++) {
+ replTest.stop(x);
+ stoppedNodes.push(replTest.nodes[x]);
}
// Wait for ReplicaSetMonitor to realize nodes are down
- ReplSetTest.awaitRSClientHosts( conn, stoppedNodes, { ok: false }, replTest.name );
+ ReplSetTest.awaitRSClientHosts(conn, stoppedNodes, {ok: false}, replTest.name);
// Wait for the last node to be in steady state -> secondary (not recovering)
var lastNode = replTest.nodes[NODES - 1];
- ReplSetTest.awaitRSClientHosts( conn, lastNode,
- { ok: true, secondary: true }, replTest.name );
+ ReplSetTest.awaitRSClientHosts(conn, lastNode, {ok: true, secondary: true}, replTest.name);
- jsTest.log( 'connpool: ' + tojson(conn.getDB('admin').runCommand({ connPoolStats: 1 })));
+ jsTest.log('connpool: ' + tojson(conn.getDB('admin').runCommand({connPoolStats: 1})));
// Test to make sure that connection is ok, in prep for priOnly test
explain = getExplain("nearest");
explainServer = getExplainServer(explain);
- assert.eq( explainServer, replTest.nodes[NODES - 1].name );
- assert.eq( 1, explain.executionStats.nReturned );
+ assert.eq(explainServer, replTest.nodes[NODES - 1].name);
+ assert.eq(1, explain.executionStats.nReturned);
// Should assert if request with priOnly but no primary
- assert.throws( function(){
- getExplain("primary");
+ assert.throws(function() {
+ getExplain("primary");
});
st.stop();
diff --git a/jstests/sharding/read_pref_cmd.js b/jstests/sharding/read_pref_cmd.js
index 9df6cc96221..1e4aa48ee25 100644
--- a/jstests/sharding/read_pref_cmd.js
+++ b/jstests/sharding/read_pref_cmd.js
@@ -7,8 +7,8 @@ var NODE_COUNT = 2;
*/
var setUp = function() {
var configDB = st.s.getDB('config');
- configDB.adminCommand({ enableSharding: 'test' });
- configDB.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
+ configDB.adminCommand({enableSharding: 'test'});
+ configDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
// Each time we drop the 'test' DB we have to re-enable profiling
st.rs0.nodes.forEach(function(node) {
@@ -38,7 +38,7 @@ var tearDown = function() {
*/
var testReadPreference = function(conn, hostList, isMongos, mode, tagSets, secExpected) {
var testDB = conn.getDB('test');
- conn.setSlaveOk(false); // purely rely on readPref
+ conn.setSlaveOk(false); // purely rely on readPref
jsTest.log('Testing mode: ' + mode + ', tag sets: ' + tojson(tagSets));
conn.setReadPref(mode, tagSets);
@@ -60,7 +60,9 @@ var testReadPreference = function(conn, hostList, isMongos, mode, tagSets, secEx
assert(cmdResult.ok);
var testedAtLeastOnce = false;
- var query = { op: 'command' };
+ var query = {
+ op: 'command'
+ };
Object.extend(query, profileQuery);
hostList.forEach(function(node) {
@@ -71,12 +73,11 @@ var testReadPreference = function(conn, hostList, isMongos, mode, tagSets, secEx
if (secOk && secExpected) {
// The command obeys read prefs and we expect to run
// commands on secondaries with this mode and tag sets
- assert(testDB.adminCommand({ isMaster: 1 }).secondary);
- }
- else {
+ assert(testDB.adminCommand({isMaster: 1}).secondary);
+ } else {
// The command does not obey read prefs, or we expect to run
// commands on primary with this mode or tag sets
- assert(testDB.adminCommand({ isMaster: 1 }).ismaster);
+ assert(testDB.adminCommand({isMaster: 1}).ismaster);
}
testedAtLeastOnce = true;
@@ -100,70 +101,75 @@ var testReadPreference = function(conn, hostList, isMongos, mode, tagSets, secEx
};
// Test command that can be sent to secondary
- cmdTest({ distinct: 'user', key: 'x', query: { x: 1 }}, true,
- formatProfileQuery({ distinct: 'user' }));
+ cmdTest(
+ {distinct: 'user', key: 'x', query: {x: 1}}, true, formatProfileQuery({distinct: 'user'}));
// Test command that can't be sent to secondary
- cmdTest({ create: 'mrIn' }, false, formatProfileQuery({ create: 'mrIn' }));
+ cmdTest({create: 'mrIn'}, false, formatProfileQuery({create: 'mrIn'}));
// Make sure mrIn is propagated to secondaries before proceeding
- testDB.runCommand({ getLastError: 1, w: NODE_COUNT });
+ testDB.runCommand({getLastError: 1, w: NODE_COUNT});
var mapFunc = function(doc) {};
- var reduceFunc = function(key, values) { return values; };
+ var reduceFunc = function(key, values) {
+ return values;
+ };
// Test inline mapReduce on sharded collection.
// Note that in sharded map reduce, it will output the result in a temp collection
// even if out is inline.
if (isMongos) {
- cmdTest({ mapreduce: 'user', map: mapFunc, reduce: reduceFunc, out: { inline: 1 }},
- false, formatProfileQuery({ mapreduce: 'user', shardedFirstPass: true }));
+ cmdTest({mapreduce: 'user', map: mapFunc, reduce: reduceFunc, out: {inline: 1}},
+ false,
+ formatProfileQuery({mapreduce: 'user', shardedFirstPass: true}));
}
// Test inline mapReduce on unsharded collection.
- cmdTest({ mapreduce: 'mrIn', map: mapFunc, reduce: reduceFunc, out: { inline: 1 }}, true,
- formatProfileQuery({ mapreduce: 'mrIn', 'out.inline': 1 }));
+ cmdTest({mapreduce: 'mrIn', map: mapFunc, reduce: reduceFunc, out: {inline: 1}},
+ true,
+ formatProfileQuery({mapreduce: 'mrIn', 'out.inline': 1}));
// Test non-inline mapReduce on sharded collection.
if (isMongos) {
- cmdTest({ mapreduce: 'user', map: mapFunc, reduce: reduceFunc,
- out: { replace: 'mrOut' }}, false,
- formatProfileQuery({ mapreduce: 'user', shardedFirstPass: true }));
+ cmdTest({mapreduce: 'user', map: mapFunc, reduce: reduceFunc, out: {replace: 'mrOut'}},
+ false,
+ formatProfileQuery({mapreduce: 'user', shardedFirstPass: true}));
}
// Test non-inline mapReduce on unsharded collection.
- cmdTest({ mapreduce: 'mrIn', map: mapFunc, reduce: reduceFunc, out: { replace: 'mrOut' }},
- false, formatProfileQuery({ mapreduce: 'mrIn', 'out.replace': 'mrOut' }));
+ cmdTest({mapreduce: 'mrIn', map: mapFunc, reduce: reduceFunc, out: {replace: 'mrOut'}},
+ false,
+ formatProfileQuery({mapreduce: 'mrIn', 'out.replace': 'mrOut'}));
// Test other commands that can be sent to secondary.
- cmdTest({ count: 'user' }, true, formatProfileQuery({ count: 'user' }));
- cmdTest({ group: { key: { x: true }, '$reduce': function(a, b) {}, ns: 'mrIn',
- initial: { x: 0 }}}, true, formatProfileQuery({ 'group.ns': 'mrIn' }));
+ cmdTest({count: 'user'}, true, formatProfileQuery({count: 'user'}));
+ cmdTest({group: {key: {x: true}, '$reduce': function(a, b) {}, ns: 'mrIn', initial: {x: 0}}},
+ true,
+ formatProfileQuery({'group.ns': 'mrIn'}));
- cmdTest({ collStats: 'user' }, true, formatProfileQuery({ count: 'user' }));
- cmdTest({ dbStats: 1 }, true, formatProfileQuery({ dbStats: 1 }));
+ cmdTest({collStats: 'user'}, true, formatProfileQuery({count: 'user'}));
+ cmdTest({dbStats: 1}, true, formatProfileQuery({dbStats: 1}));
- testDB.user.ensureIndex({ loc: '2d' });
- testDB.user.ensureIndex({ position: 'geoHaystack', type:1 }, { bucketSize: 10 });
- testDB.runCommand({ getLastError: 1, w: NODE_COUNT });
- cmdTest({ geoNear: 'user', near: [1, 1] }, true,
- formatProfileQuery({ geoNear: 'user' }));
+ testDB.user.ensureIndex({loc: '2d'});
+ testDB.user.ensureIndex({position: 'geoHaystack', type: 1}, {bucketSize: 10});
+ testDB.runCommand({getLastError: 1, w: NODE_COUNT});
+ cmdTest({geoNear: 'user', near: [1, 1]}, true, formatProfileQuery({geoNear: 'user'}));
// Mongos doesn't implement geoSearch; test it only with ReplicaSetConnection.
if (!isMongos) {
- cmdTest(
- {
- geoSearch: 'user', near: [1, 1],
- search: { type: 'restaurant'}, maxDistance: 10
- }, true, formatProfileQuery({ geoSearch: 'user'}));
+ cmdTest({geoSearch: 'user', near: [1, 1], search: {type: 'restaurant'}, maxDistance: 10},
+ true,
+ formatProfileQuery({geoSearch: 'user'}));
}
// Test on sharded
- cmdTest({ aggregate: 'user', pipeline: [{ $project: { x: 1 }}] }, true,
- formatProfileQuery({ aggregate: 'user' }));
+ cmdTest({aggregate: 'user', pipeline: [{$project: {x: 1}}]},
+ true,
+ formatProfileQuery({aggregate: 'user'}));
// Test on non-sharded
- cmdTest({ aggregate: 'mrIn', pipeline: [{ $project: { x: 1 }}] }, true,
- formatProfileQuery({ aggregate: 'mrIn' }));
+ cmdTest({aggregate: 'mrIn', pipeline: [{$project: {x: 1}}]},
+ true,
+ formatProfileQuery({aggregate: 'mrIn'}));
};
/**
@@ -187,20 +193,20 @@ var testBadMode = function(conn, hostList, isMongos, mode, tagSets) {
// Test that a command that could be routed to a secondary fails with bad mode / tags.
if (isMongos) {
// Command result should have ok: 0.
- cmdResult = testDB.runReadCommand({ distinct: 'user', key: 'x' });
+ cmdResult = testDB.runReadCommand({distinct: 'user', key: 'x'});
jsTest.log('cmd result: ' + tojson(cmdResult));
assert(!cmdResult.ok);
} else {
try {
// conn should throw error
- testDB.runReadCommand({ distinct: 'user', key: 'x' });
+ testDB.runReadCommand({distinct: 'user', key: 'x'});
failureMsg = "Unexpected success running distinct!";
- }
- catch (e) {
+ } catch (e) {
jsTest.log(e);
}
- if (failureMsg) throw failureMsg;
+ if (failureMsg)
+ throw failureMsg;
}
};
@@ -210,28 +216,28 @@ var testAllModes = function(conn, hostList, isMongos) {
// { tag: 'two' } so we can test the interaction of modes and tags. Test
// a bunch of combinations.
[
- // mode, tagSets, expectedHost
- ['primary', undefined, false],
- ['primary', [], false],
+ // mode, tagSets, expectedHost
+ ['primary', undefined, false],
+ ['primary', [], false],
- ['primaryPreferred', undefined, false],
- ['primaryPreferred', [{tag: 'one'}], false],
- // Correctly uses primary and ignores the tag
- ['primaryPreferred', [{tag: 'two'}], false],
+ ['primaryPreferred', undefined, false],
+ ['primaryPreferred', [{tag: 'one'}], false],
+ // Correctly uses primary and ignores the tag
+ ['primaryPreferred', [{tag: 'two'}], false],
- ['secondary', undefined, true],
- ['secondary', [{tag: 'two'}], true],
- ['secondary', [{tag: 'doesntexist'}, {}], true],
- ['secondary', [{tag: 'doesntexist'}, {tag:'two'}], true],
+ ['secondary', undefined, true],
+ ['secondary', [{tag: 'two'}], true],
+ ['secondary', [{tag: 'doesntexist'}, {}], true],
+ ['secondary', [{tag: 'doesntexist'}, {tag: 'two'}], true],
- ['secondaryPreferred', undefined, true],
- ['secondaryPreferred', [{tag: 'one'}], false],
- ['secondaryPreferred', [{tag: 'two'}], true],
+ ['secondaryPreferred', undefined, true],
+ ['secondaryPreferred', [{tag: 'one'}], false],
+ ['secondaryPreferred', [{tag: 'two'}], true],
- // We don't have a way to alter ping times so we can't predict where an
- // untagged 'nearest' command should go, hence only test with tags.
- ['nearest', [{tag: 'one'}], false],
- ['nearest', [{tag: 'two'}], true]
+ // We don't have a way to alter ping times so we can't predict where an
+ // untagged 'nearest' command should go, hence only test with tags.
+ ['nearest', [{tag: 'one'}], false],
+ ['nearest', [{tag: 'two'}], true]
].forEach(function(args) {
var mode = args[0], tagSets = args[1], secExpected = args[2];
@@ -242,17 +248,17 @@ var testAllModes = function(conn, hostList, isMongos) {
});
[
- // Tags not allowed with primary
- ['primary', [{dc: 'doesntexist'}]],
- ['primary', [{dc: 'ny'}]],
- ['primary', [{dc: 'one'}]],
+ // Tags not allowed with primary
+ ['primary', [{dc: 'doesntexist'}]],
+ ['primary', [{dc: 'ny'}]],
+ ['primary', [{dc: 'one'}]],
- // No matching node
- ['secondary', [{tag: 'one'}]],
- ['nearest', [{tag: 'doesntexist'}]],
+ // No matching node
+ ['secondary', [{tag: 'one'}]],
+ ['nearest', [{tag: 'doesntexist'}]],
- ['invalid-mode', undefined],
- ['secondary', ['misformatted-tags']]
+ ['invalid-mode', undefined],
+ ['secondary', ['misformatted-tags']]
].forEach(function(args) {
var mode = args[0], tagSets = args[1];
@@ -263,8 +269,8 @@ var testAllModes = function(conn, hostList, isMongos) {
});
};
-var st = new ShardingTest({shards : {rs0 : {nodes : NODE_COUNT, verbose : 1}},
- other : {mongosOptions : {verbose : 3}}});
+var st = new ShardingTest(
+ {shards: {rs0: {nodes: NODE_COUNT, verbose: 1}}, other: {mongosOptions: {verbose: 3}}});
st.stopBalancer();
ReplSetTest.awaitRSClientHosts(st.s, st.rs0.nodes);
@@ -272,8 +278,14 @@ ReplSetTest.awaitRSClientHosts(st.s, st.rs0.nodes);
// Tag primary with { dc: 'ny', tag: 'one' }, secondary with { dc: 'ny', tag: 'two' }
var primary = st.rs0.getPrimary();
var secondary = st.rs0.getSecondary();
-var PRIMARY_TAG = { dc: 'ny', tag: 'one' };
-var SECONDARY_TAG = { dc: 'ny', tag: 'two' };
+var PRIMARY_TAG = {
+ dc: 'ny',
+ tag: 'one'
+};
+var SECONDARY_TAG = {
+ dc: 'ny',
+ tag: 'two'
+};
var rsConfig = primary.getDB("local").system.replset.findOne();
jsTest.log('got rsconf ' + tojson(rsConfig));
@@ -287,13 +299,11 @@ rsConfig.members.forEach(function(member) {
rsConfig.version++;
-
jsTest.log('new rsconf ' + tojson(rsConfig));
try {
- primary.adminCommand({ replSetReconfig: rsConfig });
-}
-catch(e) {
+ primary.adminCommand({replSetReconfig: rsConfig});
+} catch (e) {
jsTest.log('replSetReconfig error: ' + e);
}
@@ -302,10 +312,9 @@ st.rs0.awaitSecondaryNodes();
// Force mongos to reconnect after our reconfig
assert.soon(function() {
try {
- st.s.getDB('foo').runCommand({ create: 'foo' });
+ st.s.getDB('foo').runCommand({create: 'foo'});
return true;
- }
- catch (x) {
+ } catch (x) {
// Intentionally caused an error that forces mongos's monitor to refresh.
jsTest.log('Caught exception while doing dummy command: ' + tojson(x));
return false;
@@ -321,8 +330,8 @@ jsTest.log('got rsconf ' + tojson(rsConfig));
var replConn = new Mongo(st.rs0.getURL());
// Make sure replica set connection is ready
-_awaitRSHostViaRSMonitor(primary.name, { ok: true, tags: PRIMARY_TAG }, st.rs0.name);
-_awaitRSHostViaRSMonitor(secondary.name, { ok: true, tags: SECONDARY_TAG }, st.rs0.name);
+_awaitRSHostViaRSMonitor(primary.name, {ok: true, tags: PRIMARY_TAG}, st.rs0.name);
+_awaitRSHostViaRSMonitor(secondary.name, {ok: true, tags: SECONDARY_TAG}, st.rs0.name);
testAllModes(replConn, st.rs0.nodes, false);
diff --git a/jstests/sharding/read_pref_multi_mongos_stale_config.js b/jstests/sharding/read_pref_multi_mongos_stale_config.js
index 3333e3678ae..42c54f82819 100644
--- a/jstests/sharding/read_pref_multi_mongos_stale_config.js
+++ b/jstests/sharding/read_pref_multi_mongos_stale_config.js
@@ -2,9 +2,11 @@
// Tests that a mongos will correctly retry a stale shard version when read preference is used
//
-var st = new ShardingTest({shards : {rs0 : {quiet : ''}, rs1 : {quiet : ''}},
- mongos : 2,
- other : {mongosOptions : {verbose : 2}}});
+var st = new ShardingTest({
+ shards: {rs0: {quiet: ''}, rs1: {quiet: ''}},
+ mongos: 2,
+ other: {mongosOptions: {verbose: 2}}
+});
var testDB1 = st.s0.getDB('test');
var testDB2 = st.s1.getDB('test');
@@ -12,28 +14,27 @@ var testDB2 = st.s1.getDB('test');
// Trigger a query on mongos 1 so it will have a view of test.user as being unsharded.
testDB1.user.findOne();
-testDB2.adminCommand({ enableSharding: 'test' });
-testDB2.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
+testDB2.adminCommand({enableSharding: 'test'});
+testDB2.adminCommand({shardCollection: 'test.user', key: {x: 1}});
-testDB2.adminCommand({ split: 'test.user', middle: { x: 100 }});
+testDB2.adminCommand({split: 'test.user', middle: {x: 100}});
var configDB2 = st.s1.getDB('config');
-var chunkToMove = configDB2.chunks.find().sort({ min: 1 }).next();
-var toShard = configDB2.shards.findOne({ _id: { $ne: chunkToMove.shard }})._id;
-testDB2.adminCommand({ moveChunk: 'test.user', to: toShard, find: { x: 50 }});
+var chunkToMove = configDB2.chunks.find().sort({min: 1}).next();
+var toShard = configDB2.shards.findOne({_id: {$ne: chunkToMove.shard}})._id;
+testDB2.adminCommand({moveChunk: 'test.user', to: toShard, find: {x: 50}});
// Insert a document into each chunk
-assert.writeOK(testDB2.user.insert({ x: 30 }));
-assert.writeOK(testDB2.user.insert({ x: 130 }));
+assert.writeOK(testDB2.user.insert({x: 30}));
+assert.writeOK(testDB2.user.insert({x: 130}));
// The testDB1 mongos does not know the chunk has been moved, and will retry
-var cursor = testDB1.user.find({ x: 30 }).readPref('primary');
+var cursor = testDB1.user.find({x: 30}).readPref('primary');
assert(cursor.hasNext());
assert.eq(30, cursor.next().x);
-cursor = testDB1.user.find({ x: 130 }).readPref('primary');
+cursor = testDB1.user.find({x: 130}).readPref('primary');
assert(cursor.hasNext());
assert.eq(130, cursor.next().x);
st.stop();
-
diff --git a/jstests/sharding/recovering_slaveok.js b/jstests/sharding/recovering_slaveok.js
index 936f8856903..cd66a1b81f5 100644
--- a/jstests/sharding/recovering_slaveok.js
+++ b/jstests/sharding/recovering_slaveok.js
@@ -2,127 +2,123 @@
// RECOVERING state, and don't break
(function() {
-'use strict';
+ 'use strict';
-var shardTest = new ShardingTest({ name: "recovering_slaveok",
- shards: 2,
- mongos: 2,
- other: { rs: true } });
+ var shardTest =
+ new ShardingTest({name: "recovering_slaveok", shards: 2, mongos: 2, other: {rs: true}});
-var mongos = shardTest.s0;
-var mongosSOK = shardTest.s1;
-mongosSOK.setSlaveOk();
+ var mongos = shardTest.s0;
+ var mongosSOK = shardTest.s1;
+ mongosSOK.setSlaveOk();
-var admin = mongos.getDB("admin");
-var config = mongos.getDB("config");
+ var admin = mongos.getDB("admin");
+ var config = mongos.getDB("config");
-var dbase = mongos.getDB("test");
-var coll = dbase.getCollection("foo");
-var dbaseSOk = mongosSOK.getDB( "" + dbase );
-var collSOk = mongosSOK.getCollection( "" + coll );
+ var dbase = mongos.getDB("test");
+ var coll = dbase.getCollection("foo");
+ var dbaseSOk = mongosSOK.getDB("" + dbase);
+ var collSOk = mongosSOK.getCollection("" + coll);
-var rsA = shardTest._rs[0].test;
-var rsB = shardTest._rs[1].test;
+ var rsA = shardTest._rs[0].test;
+ var rsB = shardTest._rs[1].test;
-assert.writeOK(rsA.getPrimary().getDB( "test_a" ).dummy.insert({ x : 1 }));
-assert.writeOK(rsB.getPrimary().getDB( "test_b" ).dummy.insert({ x : 1 }));
+ assert.writeOK(rsA.getPrimary().getDB("test_a").dummy.insert({x: 1}));
+ assert.writeOK(rsB.getPrimary().getDB("test_b").dummy.insert({x: 1}));
-rsA.awaitReplication();
-rsB.awaitReplication();
+ rsA.awaitReplication();
+ rsB.awaitReplication();
-print("1: initial insert");
+ print("1: initial insert");
-coll.save({ _id : -1, a : "a", date : new Date() });
-coll.save({ _id : 1, b : "b", date : new Date() });
+ coll.save({_id: -1, a: "a", date: new Date()});
+ coll.save({_id: 1, b: "b", date: new Date()});
-print("2: shard collection");
+ print("2: shard collection");
-shardTest.shardColl(coll, /* shardBy */ { _id : 1 }, /* splitAt */ { _id : 0 });
+ shardTest.shardColl(coll, /* shardBy */ {_id: 1}, /* splitAt */ {_id: 0});
-print("3: test normal and slaveOk queries");
+ print("3: test normal and slaveOk queries");
-// Make shardA and rsA the same
-var shardA = shardTest.getShard(coll, { _id : -1 });
-var shardAColl = shardA.getCollection( "" + coll );
-var shardB = shardTest.getShard(coll, { _id : 1 });
+ // Make shardA and rsA the same
+ var shardA = shardTest.getShard(coll, {_id: -1});
+ var shardAColl = shardA.getCollection("" + coll);
+ var shardB = shardTest.getShard(coll, {_id: 1});
-if (shardA.name == rsB.getURL()) {
- var swap = rsB;
- rsB = rsA;
- rsA = swap;
-}
+ if (shardA.name == rsB.getURL()) {
+ var swap = rsB;
+ rsB = rsA;
+ rsA = swap;
+ }
-rsA.awaitReplication();
-rsB.awaitReplication();
+ rsA.awaitReplication();
+ rsB.awaitReplication();
-// Because of async migration cleanup, we need to wait for this condition to be true
-assert.soon(function() { return coll.find().itcount() == collSOk.find().itcount(); });
+ // Because of async migration cleanup, we need to wait for this condition to be true
+ assert.soon(function() {
+ return coll.find().itcount() == collSOk.find().itcount();
+ });
-assert.eq(shardAColl.find().itcount(), 1);
-assert.eq(shardAColl.findOne()._id, -1);
+ assert.eq(shardAColl.find().itcount(), 1);
+ assert.eq(shardAColl.findOne()._id, -1);
-print("5: make one of the secondaries RECOVERING");
+ print("5: make one of the secondaries RECOVERING");
-var secs = rsA.getSecondaries();
-var goodSec = secs[0];
-var badSec = secs[1];
+ var secs = rsA.getSecondaries();
+ var goodSec = secs[0];
+ var badSec = secs[1];
-assert.commandWorked(badSec.adminCommand("replSetMaintenance"));
-rsA.waitForState(badSec, ReplSetTest.State.RECOVERING);
+ assert.commandWorked(badSec.adminCommand("replSetMaintenance"));
+ rsA.waitForState(badSec, ReplSetTest.State.RECOVERING);
-print("6: stop non-RECOVERING secondary");
+ print("6: stop non-RECOVERING secondary");
-rsA.stop(goodSec);
+ rsA.stop(goodSec);
-print("7: check our regular and slaveOk query");
+ print("7: check our regular and slaveOk query");
-assert.eq(2, coll.find().itcount());
-assert.eq(2, collSOk.find().itcount());
+ assert.eq(2, coll.find().itcount());
+ assert.eq(2, collSOk.find().itcount());
-print("8: restart both our secondaries clean");
+ print("8: restart both our secondaries clean");
-rsA.restart(rsA.getSecondaries(),
- { remember : true, startClean : true },
- undefined,
- 5 * 60 * 1000);
+ rsA.restart(rsA.getSecondaries(), {remember: true, startClean: true}, undefined, 5 * 60 * 1000);
-print("9: wait for recovery");
+ print("9: wait for recovery");
-rsA.waitForState(rsA.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000 );
+ rsA.waitForState(rsA.getSecondaries(), ReplSetTest.State.SECONDARY, 5 * 60 * 1000);
-print("10: check our regular and slaveOk query");
+ print("10: check our regular and slaveOk query");
-// We need to make sure our nodes are considered accessible from mongos - otherwise we fail
-// See SERVER-7274
-ReplSetTest.awaitRSClientHosts(coll.getMongo(), rsA.nodes, { ok : true });
-ReplSetTest.awaitRSClientHosts(coll.getMongo(), rsB.nodes, { ok : true });
+ // We need to make sure our nodes are considered accessible from mongos - otherwise we fail
+ // See SERVER-7274
+ ReplSetTest.awaitRSClientHosts(coll.getMongo(), rsA.nodes, {ok: true});
+ ReplSetTest.awaitRSClientHosts(coll.getMongo(), rsB.nodes, {ok: true});
-// We need to make sure at least one secondary is accessible from mongos - otherwise we fail
-// See SERVER-7699
-ReplSetTest.awaitRSClientHosts(collSOk.getMongo(), [rsA.getSecondaries()[0]],
- { secondary : true, ok : true });
-ReplSetTest.awaitRSClientHosts(collSOk.getMongo(), [rsB.getSecondaries()[0]],
- { secondary : true, ok : true });
+ // We need to make sure at least one secondary is accessible from mongos - otherwise we fail
+ // See SERVER-7699
+ ReplSetTest.awaitRSClientHosts(
+ collSOk.getMongo(), [rsA.getSecondaries()[0]], {secondary: true, ok: true});
+ ReplSetTest.awaitRSClientHosts(
+ collSOk.getMongo(), [rsB.getSecondaries()[0]], {secondary: true, ok: true});
-print("SlaveOK Query...");
-var sOKCount = collSOk.find().itcount();
+ print("SlaveOK Query...");
+ var sOKCount = collSOk.find().itcount();
-var collCount = null;
-try{
- print("Normal query...");
- collCount = coll.find().itcount();
-}
-catch(e){
- printjson(e);
+ var collCount = null;
+ try {
+ print("Normal query...");
+ collCount = coll.find().itcount();
+ } catch (e) {
+ printjson(e);
- // There may have been a stepdown caused by step 8, so we run this twice in a row. The first
- // time can error out.
- print("Error may have been caused by stepdown, try again.");
- collCount = coll.find().itcount();
-}
+ // There may have been a stepdown caused by step 8, so we run this twice in a row. The first
+ // time can error out.
+ print("Error may have been caused by stepdown, try again.");
+ collCount = coll.find().itcount();
+ }
-assert.eq(collCount, sOKCount);
+ assert.eq(collCount, sOKCount);
-shardTest.stop();
+ shardTest.stop();
})();
diff --git a/jstests/sharding/regex_targeting.js b/jstests/sharding/regex_targeting.js
index 5b6f9e02a79..7dd927d8aab 100644
--- a/jstests/sharding/regex_targeting.js
+++ b/jstests/sharding/regex_targeting.js
@@ -2,11 +2,13 @@
// This checks to make sure that sharded regex queries behave the same as unsharded regex queries
//
-var options = { mongosOptions : { binVersion : "" },
- shardOptions : { binVersion : "" },
- configOptions : { binVersion : "" } };
+var options = {
+ mongosOptions: {binVersion: ""},
+ shardOptions: {binVersion: ""},
+ configOptions: {binVersion: ""}
+};
-var st = new ShardingTest({ shards : 2, other : options });
+var st = new ShardingTest({shards: 2, other: options});
st.stopBalancer();
var mongos = st.s0;
@@ -23,149 +25,134 @@ var collCompound = mongos.getCollection("foo.barCompound");
var collNested = mongos.getCollection("foo.barNested");
var collHashed = mongos.getCollection("foo.barHashed");
-assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB().toString() }));
-admin.runCommand({ movePrimary : coll.getDB().toString(), to : shards[0]._id });
+assert.commandWorked(admin.runCommand({enableSharding: coll.getDB().toString()}));
+admin.runCommand({movePrimary: coll.getDB().toString(), to: shards[0]._id});
//
// Split the collection so that "abcde-0" and "abcde-1" go on different shards when possible
//
-assert.commandWorked(admin.runCommand({ shardCollection : collSharded.toString(),
- key: { a : 1 } }));
-assert.commandWorked(admin.runCommand({ split : collSharded.toString(),
- middle : { a : "abcde-1" } }));
-assert.commandWorked(admin.runCommand({ moveChunk : collSharded.toString(),
- find : { a : 0 },
- to : shards[1]._id,
- _waitForDelete : true }));
-
-assert.commandWorked(admin.runCommand({ shardCollection : collCompound.toString(),
- key: { a : 1, b : 1 } }));
-assert.commandWorked(admin.runCommand({ split : collCompound.toString(),
- middle : { a : "abcde-1", b : 0 } }));
-assert.commandWorked(admin.runCommand({ moveChunk : collCompound.toString(),
- find : { a : 0, b : 0 },
- to : shards[1]._id,
- _waitForDelete : true }));
-
-assert.commandWorked(admin.runCommand({ shardCollection : collNested.toString(),
- key : { 'a.b' : 1 } }));
-assert.commandWorked(admin.runCommand({ split : collNested.toString(),
- middle : { 'a.b' : "abcde-1" } }));
-assert.commandWorked(admin.runCommand({ moveChunk : collNested.toString(),
- find : { a : { b : 0 } },
- to : shards[1]._id,
- _waitForDelete : true }));
-
-assert.commandWorked(admin.runCommand({ shardCollection : collHashed.toString(),
- key: { hash : "hashed" } }));
+assert.commandWorked(admin.runCommand({shardCollection: collSharded.toString(), key: {a: 1}}));
+assert.commandWorked(admin.runCommand({split: collSharded.toString(), middle: {a: "abcde-1"}}));
+assert.commandWorked(admin.runCommand(
+ {moveChunk: collSharded.toString(), find: {a: 0}, to: shards[1]._id, _waitForDelete: true}));
+
+assert.commandWorked(
+ admin.runCommand({shardCollection: collCompound.toString(), key: {a: 1, b: 1}}));
+assert.commandWorked(
+ admin.runCommand({split: collCompound.toString(), middle: {a: "abcde-1", b: 0}}));
+assert.commandWorked(admin.runCommand({
+ moveChunk: collCompound.toString(),
+ find: {a: 0, b: 0},
+ to: shards[1]._id,
+ _waitForDelete: true
+}));
+
+assert.commandWorked(admin.runCommand({shardCollection: collNested.toString(), key: {'a.b': 1}}));
+assert.commandWorked(admin.runCommand({split: collNested.toString(), middle: {'a.b': "abcde-1"}}));
+assert.commandWorked(admin.runCommand({
+ moveChunk: collNested.toString(),
+ find: {a: {b: 0}},
+ to: shards[1]._id,
+ _waitForDelete: true
+}));
+
+assert.commandWorked(
+ admin.runCommand({shardCollection: collHashed.toString(), key: {hash: "hashed"}}));
st.printShardingStatus();
//
//
// Cannot insert regex _id
-assert.writeError(coll.insert({ _id : /regex value/ }));
-assert.writeError(collSharded.insert({ _id : /regex value/, a : 0 }));
-assert.writeError(collCompound.insert({ _id : /regex value/, a : 0, b : 0 }));
-assert.writeError(collNested.insert({ _id : /regex value/, a : { b : 0 } }));
-assert.writeError(collHashed.insert({ _id : /regex value/, hash : 0 }));
-
+assert.writeError(coll.insert({_id: /regex value/}));
+assert.writeError(collSharded.insert({_id: /regex value/, a: 0}));
+assert.writeError(collCompound.insert({_id: /regex value/, a: 0, b: 0}));
+assert.writeError(collNested.insert({_id: /regex value/, a: {b: 0}}));
+assert.writeError(collHashed.insert({_id: /regex value/, hash: 0}));
//
//
// (For now) we can insert a regex shard key
-assert.writeOK(collSharded.insert({ a : /regex value/ }));
-assert.writeOK(collCompound.insert({ a : /regex value/, b : "other value" }));
-assert.writeOK(collNested.insert({ a : { b : /regex value/ } }));
-assert.writeOK(collHashed.insert({ hash : /regex value/ }));
-
+assert.writeOK(collSharded.insert({a: /regex value/}));
+assert.writeOK(collCompound.insert({a: /regex value/, b: "other value"}));
+assert.writeOK(collNested.insert({a: {b: /regex value/}}));
+assert.writeOK(collHashed.insert({hash: /regex value/}));
//
//
// Query by regex should hit all matching keys, across all shards if applicable
coll.remove({});
-assert.writeOK(coll.insert({ a : "abcde-0" }));
-assert.writeOK(coll.insert({ a : "abcde-1" }));
-assert.writeOK(coll.insert({ a : /abcde.*/ }));
-assert.eq(coll.find().itcount(), coll.find({ a : /abcde.*/ }).itcount());
+assert.writeOK(coll.insert({a: "abcde-0"}));
+assert.writeOK(coll.insert({a: "abcde-1"}));
+assert.writeOK(coll.insert({a: /abcde.*/}));
+assert.eq(coll.find().itcount(), coll.find({a: /abcde.*/}).itcount());
collSharded.remove({});
-assert.writeOK(collSharded.insert({ a : "abcde-0" }));
-assert.writeOK(collSharded.insert({ a : "abcde-1" }));
-assert.writeOK(collSharded.insert({ a : /abcde.*/ }));
-assert.eq(collSharded.find().itcount(), collSharded.find({ a : /abcde.*/ }).itcount());
+assert.writeOK(collSharded.insert({a: "abcde-0"}));
+assert.writeOK(collSharded.insert({a: "abcde-1"}));
+assert.writeOK(collSharded.insert({a: /abcde.*/}));
+assert.eq(collSharded.find().itcount(), collSharded.find({a: /abcde.*/}).itcount());
collCompound.remove({});
-assert.writeOK(collCompound.insert({ a : "abcde-0", b : 0 }));
-assert.writeOK(collCompound.insert({ a : "abcde-1", b : 0 }));
-assert.writeOK(collCompound.insert({ a : /abcde.*/, b : 0 }));
-assert.eq(collCompound.find().itcount(), collCompound.find({ a : /abcde.*/ }).itcount());
+assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
+assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
+assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
+assert.eq(collCompound.find().itcount(), collCompound.find({a: /abcde.*/}).itcount());
collNested.remove({});
-assert.writeOK(collNested.insert({ a : { b : "abcde-0" } }));
-assert.writeOK(collNested.insert({ a : { b : "abcde-1" } }));
-assert.writeOK(collNested.insert({ a : { b : /abcde.*/ } }));
-assert.eq(collNested.find().itcount(), collNested.find({ 'a.b' : /abcde.*/ }).itcount());
+assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
+assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
+assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
+assert.eq(collNested.find().itcount(), collNested.find({'a.b': /abcde.*/}).itcount());
collHashed.remove({});
while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
st.shard1.getCollection(collHashed.toString()).count() == 0) {
- assert.writeOK(collHashed.insert({ hash : "abcde-" + ObjectId().toString() }));
+ assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
}
-assert.writeOK(collHashed.insert({ hash : /abcde.*/ }));
-assert.eq(collHashed.find().itcount(), collHashed.find({ hash : /abcde.*/ }).itcount());
-
+assert.writeOK(collHashed.insert({hash: /abcde.*/}));
+assert.eq(collHashed.find().itcount(), collHashed.find({hash: /abcde.*/}).itcount());
//
//
// Update by regex should hit all matching keys, across all shards if applicable
coll.remove({});
-assert.writeOK(coll.insert({ a : "abcde-0" }));
-assert.writeOK(coll.insert({ a : "abcde-1" }));
-assert.writeOK(coll.insert({ a : /abcde.*/ }));
-assert.writeOK(coll.update({ a : /abcde.*/ },
- { $set : { updated : true } },
- { multi : true }));
-assert.eq(coll.find().itcount(), coll.find({ updated : true }).itcount());
+assert.writeOK(coll.insert({a: "abcde-0"}));
+assert.writeOK(coll.insert({a: "abcde-1"}));
+assert.writeOK(coll.insert({a: /abcde.*/}));
+assert.writeOK(coll.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.eq(coll.find().itcount(), coll.find({updated: true}).itcount());
collSharded.remove({});
-assert.writeOK(collSharded.insert({ a : "abcde-0" }));
-assert.writeOK(collSharded.insert({ a : "abcde-1" }));
-assert.writeOK(collSharded.insert({ a : /abcde.*/ }));
-assert.writeOK(collSharded.update({ a : /abcde.*/ },
- { $set : { updated : true } },
- { multi : true }));
-assert.eq(collSharded.find().itcount(), collSharded.find({ updated : true }).itcount());
+assert.writeOK(collSharded.insert({a: "abcde-0"}));
+assert.writeOK(collSharded.insert({a: "abcde-1"}));
+assert.writeOK(collSharded.insert({a: /abcde.*/}));
+assert.writeOK(collSharded.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.eq(collSharded.find().itcount(), collSharded.find({updated: true}).itcount());
collCompound.remove({});
-assert.writeOK(collCompound.insert({ a : "abcde-0", b : 0 }));
-assert.writeOK(collCompound.insert({ a : "abcde-1", b : 0 }));
-assert.writeOK(collCompound.insert({ a : /abcde.*/, b : 0 }));
-assert.writeOK(collCompound.update({ a : /abcde.*/ },
- { $set : { updated : true } },
- { multi : true }));
-assert.eq(collCompound.find().itcount(), collCompound.find({ updated : true }).itcount());
+assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
+assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
+assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
+assert.writeOK(collCompound.update({a: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.eq(collCompound.find().itcount(), collCompound.find({updated: true}).itcount());
collNested.remove({});
-assert.writeOK(collNested.insert({ a : { b : "abcde-0" } }));
-assert.writeOK(collNested.insert({ a : { b : "abcde-1" } }));
-assert.writeOK(collNested.insert({ a : { b : /abcde.*/ } }));
-assert.writeOK(collNested.update({ 'a.b' : /abcde.*/ },
- { $set : { updated : true } },
- { multi : true }));
-assert.eq(collNested.find().itcount(), collNested.find({ updated : true }).itcount());
+assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
+assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
+assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
+assert.writeOK(collNested.update({'a.b': /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.eq(collNested.find().itcount(), collNested.find({updated: true}).itcount());
collHashed.remove({});
while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
st.shard1.getCollection(collHashed.toString()).count() == 0) {
- assert.writeOK(collHashed.insert({ hash : "abcde-" + ObjectId().toString() }));
+ assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
}
-assert.writeOK(collHashed.insert({ hash : /abcde.*/ }));
-assert.writeOK(collHashed.update({ hash : /abcde.*/ },
- { $set : { updated : true } },
- { multi : true }));
-assert.eq(collHashed.find().itcount(), collHashed.find({ updated : true }).itcount());
+assert.writeOK(collHashed.insert({hash: /abcde.*/}));
+assert.writeOK(collHashed.update({hash: /abcde.*/}, {$set: {updated: true}}, {multi: true}));
+assert.eq(collHashed.find().itcount(), collHashed.find({updated: true}).itcount());
//
//
@@ -174,18 +161,19 @@ assert.eq(collHashed.find().itcount(), collHashed.find({ updated : true }).itcou
collSharded.remove({});
collCompound.remove({});
collNested.remove({});
-assert.writeError(collSharded.update({ a : /abcde.*/ }, { $set : { a : /abcde.*/ } },
- { upsert : true }));
-assert.writeError(collCompound.update({ a : /abcde.*/ }, { $set : { a : /abcde.*/, b : 1 } },
- { upsert : true }));
+assert.writeError(collSharded.update({a: /abcde.*/}, {$set: {a: /abcde.*/}}, {upsert: true}));
+assert.writeError(collCompound.update({a: /abcde.*/},
+ {$set: {a: /abcde.*/, b: 1}},
+ {upsert: true}));
// Exact regex in query never equality
-assert.writeError(collNested.update({ 'a.b' : /abcde.*/ }, { $set : { 'a.b' : /abcde.*/ } },
- { upsert : true }));
+assert.writeError(collNested.update({'a.b': /abcde.*/},
+ {$set: {'a.b': /abcde.*/}},
+ {upsert: true}));
// Even nested regexes are not extracted in queries
-assert.writeError(collNested.update({ a : { b : /abcde.*/ } }, { $set : { 'a.b' : /abcde.*/ } },
- { upsert : true }));
-assert.writeError(collNested.update({ c : 1 }, { $set : { 'a.b' : /abcde.*/ } },
- { upsert : true }));
+assert.writeError(collNested.update({a: {b: /abcde.*/}},
+ {$set: {'a.b': /abcde.*/}},
+ {upsert: true}));
+assert.writeError(collNested.update({c: 1}, {$set: {'a.b': /abcde.*/}}, {upsert: true}));
//
//
@@ -194,84 +182,74 @@ assert.writeError(collNested.update({ c : 1 }, { $set : { 'a.b' : /abcde.*/ } },
collSharded.remove({});
collCompound.remove({});
collNested.remove({});
-assert.writeOK(collSharded.update({ a : /abcde.*/ }, { a : /abcde.*/ }, { upsert : true }));
-assert.writeOK(collCompound.update({ a : /abcde.*/ }, { a : /abcde.*/, b : 1 }, { upsert : true }));
-assert.writeOK(collNested.update({ 'a.b' : /abcde.*/ }, { a : { b : /abcde.*/ } },
- { upsert : true }));
-assert.writeOK(collNested.update({ a : { b : /abcde.*/ } }, { a : { b : /abcde.*/ } },
- { upsert : true }));
-assert.writeOK(collNested.update({ c : 1 }, { a : { b : /abcde.*/ } },
- { upsert : true }));
+assert.writeOK(collSharded.update({a: /abcde.*/}, {a: /abcde.*/}, {upsert: true}));
+assert.writeOK(collCompound.update({a: /abcde.*/}, {a: /abcde.*/, b: 1}, {upsert: true}));
+assert.writeOK(collNested.update({'a.b': /abcde.*/}, {a: {b: /abcde.*/}}, {upsert: true}));
+assert.writeOK(collNested.update({a: {b: /abcde.*/}}, {a: {b: /abcde.*/}}, {upsert: true}));
+assert.writeOK(collNested.update({c: 1}, {a: {b: /abcde.*/}}, {upsert: true}));
//
//
// Remove by regex should hit all matching keys, across all shards if applicable
coll.remove({});
-assert.writeOK(coll.insert({ a : "abcde-0" }));
-assert.writeOK(coll.insert({ a : "abcde-1" }));
-assert.writeOK(coll.insert({ a : /abcde.*/ }));
-assert.writeOK(coll.remove({ a : /abcde.*/ }));
+assert.writeOK(coll.insert({a: "abcde-0"}));
+assert.writeOK(coll.insert({a: "abcde-1"}));
+assert.writeOK(coll.insert({a: /abcde.*/}));
+assert.writeOK(coll.remove({a: /abcde.*/}));
assert.eq(0, coll.find({}).itcount());
-
collSharded.remove({});
-assert.writeOK(collSharded.insert({ a : "abcde-0" }));
-assert.writeOK(collSharded.insert({ a : "abcde-1" }));
-assert.writeOK(collSharded.insert({ a : /abcde.*/ }));
-assert.writeOK(collSharded.remove({ a : /abcde.*/ }));
+assert.writeOK(collSharded.insert({a: "abcde-0"}));
+assert.writeOK(collSharded.insert({a: "abcde-1"}));
+assert.writeOK(collSharded.insert({a: /abcde.*/}));
+assert.writeOK(collSharded.remove({a: /abcde.*/}));
assert.eq(0, collSharded.find({}).itcount());
collCompound.remove({});
-assert.writeOK(collCompound.insert({ a : "abcde-0", b : 0 }));
-assert.writeOK(collCompound.insert({ a : "abcde-1", b : 0 }));
-assert.writeOK(collCompound.insert({ a : /abcde.*/, b : 0 }));
-assert.writeOK(collCompound.remove({ a : /abcde.*/ }));
+assert.writeOK(collCompound.insert({a: "abcde-0", b: 0}));
+assert.writeOK(collCompound.insert({a: "abcde-1", b: 0}));
+assert.writeOK(collCompound.insert({a: /abcde.*/, b: 0}));
+assert.writeOK(collCompound.remove({a: /abcde.*/}));
assert.eq(0, collCompound.find({}).itcount());
collNested.remove({});
-assert.writeOK(collNested.insert({ a : { b : "abcde-0" } }));
-assert.writeOK(collNested.insert({ a : { b : "abcde-1" } }));
-assert.writeOK(collNested.insert({ a : { b : /abcde.*/ } }));
-assert.writeOK(collNested.remove({ 'a.b' : /abcde.*/ }));
+assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
+assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
+assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
+assert.writeOK(collNested.remove({'a.b': /abcde.*/}));
assert.eq(0, collNested.find({}).itcount());
collHashed.remove({});
while (st.shard0.getCollection(collHashed.toString()).count() == 0 ||
st.shard1.getCollection(collHashed.toString()).count() == 0) {
- assert.writeOK(collHashed.insert({ hash : "abcde-" + ObjectId().toString() }));
+ assert.writeOK(collHashed.insert({hash: "abcde-" + ObjectId().toString()}));
}
-assert.writeOK(collHashed.insert({ hash : /abcde.*/ }));
-assert.writeOK(collHashed.remove({ hash : /abcde.*/ }));
+assert.writeOK(collHashed.insert({hash: /abcde.*/}));
+assert.writeOK(collHashed.remove({hash: /abcde.*/}));
assert.eq(0, collHashed.find({}).itcount());
-
//
//
// Query/Update/Remove by nested regex is different depending on how the nested regex is specified
coll.remove({});
-assert.writeOK(coll.insert({ a : { b : "abcde-0" } }));
-assert.writeOK(coll.insert({ a : { b : "abcde-1" } }));
-assert.writeOK(coll.insert({ a : { b : /abcde.*/ } }));
-assert.eq(1, coll.find({ a : { b : /abcde.*/ } }).itcount());
-assert.writeOK(coll.update({ a : { b : /abcde.*/ } },
- { $set : { updated : true } },
- { multi : true }));
-assert.eq(1, coll.find({ updated : true }).itcount());
-assert.writeOK(coll.remove({ a : { b : /abcde.*/ } }));
+assert.writeOK(coll.insert({a: {b: "abcde-0"}}));
+assert.writeOK(coll.insert({a: {b: "abcde-1"}}));
+assert.writeOK(coll.insert({a: {b: /abcde.*/}}));
+assert.eq(1, coll.find({a: {b: /abcde.*/}}).itcount());
+assert.writeOK(coll.update({a: {b: /abcde.*/}}, {$set: {updated: true}}, {multi: true}));
+assert.eq(1, coll.find({updated: true}).itcount());
+assert.writeOK(coll.remove({a: {b: /abcde.*/}}));
assert.eq(2, coll.find().itcount());
collNested.remove({});
-assert.writeOK(collNested.insert({ a : { b : "abcde-0" } }));
-assert.writeOK(collNested.insert({ a : { b : "abcde-1" } }));
-assert.writeOK(collNested.insert({ a : { b : /abcde.*/ } }));
-assert.eq(1, collNested.find({ a : { b : /abcde.*/ } }).itcount());
-assert.writeOK(collNested.update({ a : { b : /abcde.*/ } },
- { $set : { updated : true } },
- { multi : true }));
-assert.eq(1, collNested.find({ updated : true }).itcount());
-assert.writeOK(collNested.remove({ a : { b : /abcde.*/ } }));
+assert.writeOK(collNested.insert({a: {b: "abcde-0"}}));
+assert.writeOK(collNested.insert({a: {b: "abcde-1"}}));
+assert.writeOK(collNested.insert({a: {b: /abcde.*/}}));
+assert.eq(1, collNested.find({a: {b: /abcde.*/}}).itcount());
+assert.writeOK(collNested.update({a: {b: /abcde.*/}}, {$set: {updated: true}}, {multi: true}));
+assert.eq(1, collNested.find({updated: true}).itcount());
+assert.writeOK(collNested.remove({a: {b: /abcde.*/}}));
assert.eq(2, collNested.find().itcount());
jsTest.log("DONE!");
st.stop();
-
diff --git a/jstests/sharding/remove1.js b/jstests/sharding/remove1.js
index 22443aae938..8dd315ffa2a 100644
--- a/jstests/sharding/remove1.js
+++ b/jstests/sharding/remove1.js
@@ -1,32 +1,34 @@
(function() {
-var s = new ShardingTest({ name: "remove_shard1", shards: 2 });
+ var s = new ShardingTest({name: "remove_shard1", shards: 2});
-assert.eq( 2, s.config.shards.count() , "initial server count wrong" );
+ assert.eq(2, s.config.shards.count(), "initial server count wrong");
-assert.writeOK(s.config.databases.insert({ _id: 'needToMove',
- partitioned: false,
- primary: 'shard0000'}));
+ assert.writeOK(
+ s.config.databases.insert({_id: 'needToMove', partitioned: false, primary: 'shard0000'}));
-// Returns an error when trying to remove a shard that doesn't exist.
-assert.commandFailed(s.admin.runCommand({ removeshard: "shardz" }));
+ // Returns an error when trying to remove a shard that doesn't exist.
+ assert.commandFailed(s.admin.runCommand({removeshard: "shardz"}));
-// first remove puts in draining mode, the second tells me a db needs to move, the third actually removes
-assert( s.admin.runCommand( { removeshard: "shard0000" } ).ok , "failed to start draining shard" );
-assert( !s.admin.runCommand( { removeshard: "shard0001" } ).ok , "allowed two draining shards" );
-assert.eq( s.admin.runCommand( { removeshard: "shard0000" } ).dbsToMove, ['needToMove'] , "didn't show db to move" );
-s.getDB('needToMove').dropDatabase();
-assert( s.admin.runCommand( { removeshard: "shard0000" } ).ok , "failed to remove shard" );
-assert.eq( 1, s.config.shards.count() , "removed server still appears in count" );
+ // first remove puts in draining mode, the second tells me a db needs to move, the third
+ // actually removes
+ assert(s.admin.runCommand({removeshard: "shard0000"}).ok, "failed to start draining shard");
+ assert(!s.admin.runCommand({removeshard: "shard0001"}).ok, "allowed two draining shards");
+ assert.eq(s.admin.runCommand({removeshard: "shard0000"}).dbsToMove,
+ ['needToMove'],
+ "didn't show db to move");
+ s.getDB('needToMove').dropDatabase();
+ assert(s.admin.runCommand({removeshard: "shard0000"}).ok, "failed to remove shard");
+ assert.eq(1, s.config.shards.count(), "removed server still appears in count");
-assert( !s.admin.runCommand( { removeshard: "shard0001" } ).ok , "allowed removing last shard" );
+ assert(!s.admin.runCommand({removeshard: "shard0001"}).ok, "allowed removing last shard");
-// should create a shard0002 shard
-var conn = MongoRunner.runMongod({});
-assert( s.admin.runCommand( { addshard: conn.host } ).ok, "failed to add shard" );
-assert.eq( 2, s.config.shards.count(), "new server does not appear in count" );
+ // should create a shard0002 shard
+ var conn = MongoRunner.runMongod({});
+ assert(s.admin.runCommand({addshard: conn.host}).ok, "failed to add shard");
+ assert.eq(2, s.config.shards.count(), "new server does not appear in count");
-MongoRunner.stopMongod(conn);
-s.stop();
+ MongoRunner.stopMongod(conn);
+ s.stop();
})();
diff --git a/jstests/sharding/remove2.js b/jstests/sharding/remove2.js
index 08af78404dd..b8c8d2f1b9e 100644
--- a/jstests/sharding/remove2.js
+++ b/jstests/sharding/remove2.js
@@ -1,153 +1,148 @@
// Test that removing and re-adding shard works correctly.
seedString = function(replTest) {
- members = replTest.getReplSetConfig().members.map(function(elem) { return elem.host; });
+ members = replTest.getReplSetConfig().members.map(function(elem) {
+ return elem.host;
+ });
return replTest.name + '/' + members.join(',');
};
removeShard = function(st, replTest) {
- print( "Removing shard with name: " + replTest.name );
- res = st.admin.runCommand( { removeshard: replTest.name } );
+ print("Removing shard with name: " + replTest.name);
+ res = st.admin.runCommand({removeshard: replTest.name});
printjson(res);
- assert( res.ok , "failed to start draining shard" );
+ assert(res.ok, "failed to start draining shard");
checkRemoveShard = function() {
- res = st.admin.runCommand( { removeshard: replTest.name } );
+ res = st.admin.runCommand({removeshard: replTest.name});
printjson(res);
return res.ok && res.msg == 'removeshard completed successfully';
};
- assert.soon( checkRemoveShard, "failed to remove shard", 5 * 60000 );
+ assert.soon(checkRemoveShard, "failed to remove shard", 5 * 60000);
// Need to wait for migration to be over... only works for inline deletes
checkNSLock = function() {
- printjson( st.s.getDB( "config" ).locks.find().toArray() );
+ printjson(st.s.getDB("config").locks.find().toArray());
return !st.isAnyBalanceInFlight();
};
- assert.soon( checkNSLock, "migrations did not end?" );
-
- sleep( 2000 );
-
- var directdb = replTest.getPrimary().getDB( "admin" );
- assert.soon( function(){
- var res = directdb.currentOp( { desc: /^clean/ } );
- print( "eliot: " + replTest.getPrimary() + "\t" + tojson(res) );
- return res.inprog.length == 0;
- }, "never clean", 5 * 60 * 1000, 1000 );
-
- replTest.getPrimary().getDB( coll.getDB().getName() ).dropDatabase();
- print( "Shard removed successfully" );
+ assert.soon(checkNSLock, "migrations did not end?");
+
+ sleep(2000);
+
+ var directdb = replTest.getPrimary().getDB("admin");
+ assert.soon(function() {
+ var res = directdb.currentOp({desc: /^clean/});
+ print("eliot: " + replTest.getPrimary() + "\t" + tojson(res));
+ return res.inprog.length == 0;
+ }, "never clean", 5 * 60 * 1000, 1000);
+
+ replTest.getPrimary().getDB(coll.getDB().getName()).dropDatabase();
+ print("Shard removed successfully");
};
addShard = function(st, replTest) {
seed = seedString(replTest);
- print( "Adding shard with seed: " + seed );
+ print("Adding shard with seed: " + seed);
try {
- assert.eq(true, st.adminCommand({ addshard : seed }));
+ assert.eq(true, st.adminCommand({addshard: seed}));
} catch (e) {
print("First attempt to addShard failed, trying again");
// transport error on first attempt is expected. Make sure second attempt goes through
- assert.eq(true, st.adminCommand({ addshard : seed }));
+ assert.eq(true, st.adminCommand({addshard: seed}));
}
- ReplSetTest.awaitRSClientHosts( new Mongo( st.s.host ),
- replTest.getSecondaries(),
- {ok : true, secondary : true} );
+ ReplSetTest.awaitRSClientHosts(
+ new Mongo(st.s.host), replTest.getSecondaries(), {ok: true, secondary: true});
- assert.soon( function() {
- var x = st.chunkDiff( coll.getName() , coll.getDB().getName() );
- print( "chunk diff: " + x );
+ assert.soon(function() {
+ var x = st.chunkDiff(coll.getName(), coll.getDB().getName());
+ print("chunk diff: " + x);
return x < 2;
- } , "no balance happened", 30 * 60 * 1000 );
+ }, "no balance happened", 30 * 60 * 1000);
try {
- assert.eq( 300, coll.find().itcount() );
+ assert.eq(300, coll.find().itcount());
} catch (e) {
// Expected. First query might get transport error and need to reconnect.
printjson(e);
- assert.eq( 300, coll.find().itcount() );
+ assert.eq(300, coll.find().itcount());
}
- print( "Shard added successfully" );
+ print("Shard added successfully");
};
-var st = new ShardingTest({ shards: {
- rs0: { nodes: 2 },
- rs1: { nodes: 2 }
- },
- other: {
- chunkSize: 1,
- enableBalancer: true
- }});
+var st = new ShardingTest(
+ {shards: {rs0: {nodes: 2}, rs1: {nodes: 2}}, other: {chunkSize: 1, enableBalancer: true}});
// Pending resolution of SERVER-8598, we need to wait for deletion after chunk migrations to avoid
// a pending delete re-creating a database after it was dropped.
-st.s.getDB("config").settings.update( { _id: "balancer" },
- { $set : { _waitForDelete : true } },
- true );
+st.s.getDB("config").settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true);
var rst0 = st._rs[0].test;
var rst1 = st._rs[1].test;
-var conn = new Mongo( st.s.host );
-var coll = conn.getCollection( "test.remove2" );
+var conn = new Mongo(st.s.host);
+var coll = conn.getCollection("test.remove2");
coll.drop();
-// Decrease how long it will take for rst0 to time out its ReplicaSetMonitor for rst1 when rs1 is shut down
-for( var i = 0; i < rst0.nodes.length; i++ ) {
+// Decrease how long it will take for rst0 to time out its ReplicaSetMonitor for rst1 when rs1 is
+// shut down
+for (var i = 0; i < rst0.nodes.length; i++) {
node = rst0.nodes[i];
- res = node.getDB('admin').runCommand({ setParameter : 1, replMonitorMaxFailedChecks : 1 });
- printjson( res );
- assert( res.ok );
+ res = node.getDB('admin').runCommand({setParameter: 1, replMonitorMaxFailedChecks: 1});
+ printjson(res);
+ assert(res.ok);
}
-st.admin.runCommand({ enableSharding : coll.getDB().getName() });
+st.admin.runCommand({enableSharding: coll.getDB().getName()});
st.ensurePrimaryShard(coll.getDB().getName(), 'test-rs0');
-st.admin.runCommand({ shardCollection : coll.getFullName(), key: { i : 1 }});
+st.admin.runCommand({shardCollection: coll.getFullName(), key: {i: 1}});
// Setup initial data
var str = 'a';
-while( str.length < 1024 * 16 ) {
+while (str.length < 1024 * 16) {
str += str;
}
var bulk = coll.initializeUnorderedBulkOp();
-for( var i = 0; i < 300; i++ ){
- bulk.insert({ i: i % 10, str: str });
+for (var i = 0; i < 300; i++) {
+ bulk.insert({i: i % 10, str: str});
}
assert.writeOK(bulk.execute());
-assert.eq( 300, coll.find().itcount() );
+assert.eq(300, coll.find().itcount());
-assert.soon( function() {
- var x = st.chunkDiff( 'remove2' , "test" ); print( "chunk diff: " + x ); return x < 2;
-} , "no balance happened", 30 * 60 * 1000 );
+assert.soon(function() {
+ var x = st.chunkDiff('remove2', "test");
+ print("chunk diff: " + x);
+ return x < 2;
+}, "no balance happened", 30 * 60 * 1000);
-assert.eq( 300, coll.find().itcount() );
+assert.eq(300, coll.find().itcount());
st.printShardingStatus();
// Remove shard and add it back in, without shutting it down.
-jsTestLog( "Attempting to remove shard and add it back in" );
-removeShard( st, rst1 );
-addShard(st, rst1 );
-
+jsTestLog("Attempting to remove shard and add it back in");
+removeShard(st, rst1);
+addShard(st, rst1);
// Remove shard, restart set, then add it back in.
-jsTestLog( "Attempting to remove shard, restart the set, and then add it back in" );
+jsTestLog("Attempting to remove shard, restart the set, and then add it back in");
originalSeed = seedString(rst1);
-removeShard( st, rst1 );
+removeShard(st, rst1);
rst1.stopSet();
-print( "Sleeping for 20 seconds to let the other shard's ReplicaSetMonitor time out" );
-sleep( 20000 ); // 1 failed check should take 10 seconds, sleep for 20 just to be safe
+print("Sleeping for 20 seconds to let the other shard's ReplicaSetMonitor time out");
+sleep(20000); // 1 failed check should take 10 seconds, sleep for 20 just to be safe
rst1.startSet();
rst1.initiate();
rst1.awaitReplication();
-assert.eq( originalSeed, seedString(rst1), "Set didn't come back up with the same hosts as before" );
-addShard( st, rst1 );
+assert.eq(originalSeed, seedString(rst1), "Set didn't come back up with the same hosts as before");
+addShard(st, rst1);
-
-// Shut down shard and wait for its ReplicaSetMonitor to be cleaned up, then start it back up and use it.
+// Shut down shard and wait for its ReplicaSetMonitor to be cleaned up, then start it back up and
+// use it.
// TODO: test this both with AND without waiting for the ReplicaSetMonitor to be cleaned up.
// This part doesn't pass, even without cleaning up the ReplicaSetMonitor - see SERVER-5900.
/*printjson( conn.getDB('admin').runCommand({movePrimary : 'test2', to : rst1.name}) );
@@ -179,41 +174,39 @@ if ( !gle.ok ) {
assert.eq( 1, conn.getDB('test2').foo.find().itcount() );
assert( conn.getDB('test2').dropDatabase().ok );*/
-
// Remove shard and add a new shard with the same replica set and shard name, but different ports.
-jsTestLog( "Attempt removing shard and adding a new shard with the same Replica Set name" );
-removeShard( st, rst1 );
+jsTestLog("Attempt removing shard and adding a new shard with the same Replica Set name");
+removeShard(st, rst1);
rst1.stopSet();
-print( "Sleeping for 20 seconds to let the other shard's ReplicaSetMonitor time out" );
-sleep( 20000 );
-
+print("Sleeping for 20 seconds to let the other shard's ReplicaSetMonitor time out");
+sleep(20000);
-var rst2 = new ReplSetTest({name : rst1.name, nodes : 2, useHostName : true});
+var rst2 = new ReplSetTest({name: rst1.name, nodes: 2, useHostName: true});
rst2.startSet();
rst2.initiate();
rst2.awaitReplication();
-addShard( st, rst2 );
-printjson( st.admin.runCommand({movePrimary : 'test2', to : rst2.name}) );
+addShard(st, rst2);
+printjson(st.admin.runCommand({movePrimary: 'test2', to: rst2.name}));
-assert.eq( 300, coll.find().itcount() );
-conn.getDB('test2').foo.insert({a:1});
-assert.eq( 1, conn.getDB('test2').foo.find().itcount() );
+assert.eq(300, coll.find().itcount());
+conn.getDB('test2').foo.insert({a: 1});
+assert.eq(1, conn.getDB('test2').foo.find().itcount());
// Can't shut down with rst2 in the set or ShardingTest will fail trying to cleanup on shutdown.
// Have to take out rst2 and put rst1 back into the set so that it can clean up.
-jsTestLog( "Putting ShardingTest back to state it expects" );
-printjson( st.admin.runCommand({movePrimary : 'test2', to : rst0.name}) );
-removeShard( st, rst2 );
+jsTestLog("Putting ShardingTest back to state it expects");
+printjson(st.admin.runCommand({movePrimary: 'test2', to: rst0.name}));
+removeShard(st, rst2);
rst2.stopSet();
rst1.startSet();
rst1.initiate();
rst1.awaitReplication();
-assert.eq( originalSeed, seedString(rst1), "Set didn't come back up with the same hosts as before" );
-addShard( st, rst1 );
+assert.eq(originalSeed, seedString(rst1), "Set didn't come back up with the same hosts as before");
+addShard(st, rst1);
-jsTestLog( "finishing!" );
+jsTestLog("finishing!");
// this should be fixed by SERVER-22176
-st.stop({ allowedExitCodes: [ MongoRunner.EXIT_ABRUPT ] });
+st.stop({allowedExitCodes: [MongoRunner.EXIT_ABRUPT]});
diff --git a/jstests/sharding/remove3.js b/jstests/sharding/remove3.js
index 1ca64fc3d10..fdbaeb4d142 100644
--- a/jstests/sharding/remove3.js
+++ b/jstests/sharding/remove3.js
@@ -1,48 +1,44 @@
// Validates the remove/drain shard functionality when there is data on the shard being removed
(function() {
-'use strict';
-
-var st = new ShardingTest({ name: "remove_shard3", shards: 2, mongos: 2 });
-
-assert.commandWorked(st.s0.adminCommand({ enableSharding: 'TestDB' }));
-st.ensurePrimaryShard('TestDB', 'shard0000');
-assert.commandWorked(st.s0.adminCommand({ shardCollection: 'TestDB.Coll', key: { _id: 1 } }));
-assert.commandWorked(st.s0.adminCommand({ split: 'TestDB.Coll', middle: { _id: 0 } }));
-
-// Insert some documents and make sure there are docs on both shards
-st.s0.getDB('TestDB').Coll.insert({ _id: -1, value: 'Negative value' });
-st.s0.getDB('TestDB').Coll.insert({ _id: 1, value: 'Positive value' });
-
-assert.commandWorked(st.s0.adminCommand({ moveChunk: 'TestDB.Coll',
- find: { _id: 1 },
- to: 'shard0001',
- _waitForDelete: true }));
-
-// Make sure both mongos instances know of the latest metadata
-assert.eq(2, st.s0.getDB('TestDB').Coll.find({}).toArray().length);
-assert.eq(2, st.s1.getDB('TestDB').Coll.find({}).toArray().length);
-
-// Remove shard0001
-var removeRes;
-removeRes = assert.commandWorked(st.s0.adminCommand({ removeShard: 'shard0001' }));
-assert.eq('started', removeRes.state);
-removeRes = assert.commandWorked(st.s0.adminCommand({ removeShard: 'shard0001' }));
-assert.eq('ongoing', removeRes.state);
-
-// Move the one chunk off shard0001
-assert.commandWorked(st.s0.adminCommand({ moveChunk: 'TestDB.Coll',
- find: { _id: 1 },
- to: 'shard0000',
- _waitForDelete: true }));
-
-// Remove shard must succeed now
-removeRes = assert.commandWorked(st.s0.adminCommand({ removeShard: 'shard0001' }));
-assert.eq('completed', removeRes.state);
-
-// Make sure both mongos instance refresh their metadata and do not reference the missing shard
-assert.eq(2, st.s0.getDB('TestDB').Coll.find({}).toArray().length);
-assert.eq(2, st.s1.getDB('TestDB').Coll.find({}).toArray().length);
-
-st.stop();
-
+ 'use strict';
+
+ var st = new ShardingTest({name: "remove_shard3", shards: 2, mongos: 2});
+
+ assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'}));
+ st.ensurePrimaryShard('TestDB', 'shard0000');
+ assert.commandWorked(st.s0.adminCommand({shardCollection: 'TestDB.Coll', key: {_id: 1}}));
+ assert.commandWorked(st.s0.adminCommand({split: 'TestDB.Coll', middle: {_id: 0}}));
+
+ // Insert some documents and make sure there are docs on both shards
+ st.s0.getDB('TestDB').Coll.insert({_id: -1, value: 'Negative value'});
+ st.s0.getDB('TestDB').Coll.insert({_id: 1, value: 'Positive value'});
+
+ assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: 'TestDB.Coll', find: {_id: 1}, to: 'shard0001', _waitForDelete: true}));
+
+ // Make sure both mongos instances know of the latest metadata
+ assert.eq(2, st.s0.getDB('TestDB').Coll.find({}).toArray().length);
+ assert.eq(2, st.s1.getDB('TestDB').Coll.find({}).toArray().length);
+
+ // Remove shard0001
+ var removeRes;
+ removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: 'shard0001'}));
+ assert.eq('started', removeRes.state);
+ removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: 'shard0001'}));
+ assert.eq('ongoing', removeRes.state);
+
+ // Move the one chunk off shard0001
+ assert.commandWorked(st.s0.adminCommand(
+ {moveChunk: 'TestDB.Coll', find: {_id: 1}, to: 'shard0000', _waitForDelete: true}));
+
+ // Remove shard must succeed now
+ removeRes = assert.commandWorked(st.s0.adminCommand({removeShard: 'shard0001'}));
+ assert.eq('completed', removeRes.state);
+
+ // Make sure both mongos instance refresh their metadata and do not reference the missing shard
+ assert.eq(2, st.s0.getDB('TestDB').Coll.find({}).toArray().length);
+ assert.eq(2, st.s1.getDB('TestDB').Coll.find({}).toArray().length);
+
+ st.stop();
+
})();
diff --git a/jstests/sharding/rename.js b/jstests/sharding/rename.js
index 116af4592ae..e8518a1e6bf 100644
--- a/jstests/sharding/rename.js
+++ b/jstests/sharding/rename.js
@@ -1,63 +1,58 @@
(function() {
-'use strict';
+ 'use strict';
-var s = new ShardingTest({ name: "rename",
- shards: 2,
- mongos: 1,
- rs: { oplogSize: 10 } });
+ var s = new ShardingTest({name: "rename", shards: 2, mongos: 1, rs: {oplogSize: 10}});
-var db = s.getDB("test");
-var replTest = s.rs0;
+ var db = s.getDB("test");
+ var replTest = s.rs0;
-db.foo.insert({ _id: 1 });
-db.foo.renameCollection('bar');
-assert.isnull(db.getLastError(), '1.0');
-assert.eq(db.bar.findOne(), { _id: 1 }, '1.1');
-assert.eq(db.bar.count(), 1, '1.2');
-assert.eq(db.foo.count(), 0, '1.3');
+ db.foo.insert({_id: 1});
+ db.foo.renameCollection('bar');
+ assert.isnull(db.getLastError(), '1.0');
+ assert.eq(db.bar.findOne(), {_id: 1}, '1.1');
+ assert.eq(db.bar.count(), 1, '1.2');
+ assert.eq(db.foo.count(), 0, '1.3');
-db.foo.insert({ _id: 2 });
-db.foo.renameCollection('bar', true);
-assert.isnull(db.getLastError(), '2.0');
-assert.eq(db.bar.findOne(), { _id: 2 }, '2.1');
-assert.eq(db.bar.count(), 1, '2.2');
-assert.eq(db.foo.count(), 0, '2.3');
+ db.foo.insert({_id: 2});
+ db.foo.renameCollection('bar', true);
+ assert.isnull(db.getLastError(), '2.0');
+ assert.eq(db.bar.findOne(), {_id: 2}, '2.1');
+ assert.eq(db.bar.count(), 1, '2.2');
+ assert.eq(db.foo.count(), 0, '2.3');
-s.adminCommand({ enablesharding: "test" });
-s.getDB('admin').runCommand({ movePrimary: 'test', to: 'rename-rs0' });
+ s.adminCommand({enablesharding: "test"});
+ s.getDB('admin').runCommand({movePrimary: 'test', to: 'rename-rs0'});
-jsTest.log("Testing write concern (1)");
+ jsTest.log("Testing write concern (1)");
-db.foo.insert({ _id: 3 });
-db.foo.renameCollection('bar', true);
+ db.foo.insert({_id: 3});
+ db.foo.renameCollection('bar', true);
-var ans = db.runCommand({ getLastError: 1, w: 3 });
-printjson(ans);
-assert.isnull(ans.err, '3.0');
+ var ans = db.runCommand({getLastError: 1, w: 3});
+ printjson(ans);
+ assert.isnull(ans.err, '3.0');
-assert.eq(db.bar.findOne(), { _id: 3 }, '3.1');
-assert.eq(db.bar.count(), 1, '3.2');
-assert.eq(db.foo.count(), 0, '3.3');
+ assert.eq(db.bar.findOne(), {_id: 3}, '3.1');
+ assert.eq(db.bar.count(), 1, '3.2');
+ assert.eq(db.foo.count(), 0, '3.3');
-// Ensure write concern works by shutting down 1 node in a replica set shard
-jsTest.log("Testing write concern (2)");
+ // Ensure write concern works by shutting down 1 node in a replica set shard
+ jsTest.log("Testing write concern (2)");
-// Kill any node. Don't care if it's a primary or secondary.
-replTest.stop(0);
+ // Kill any node. Don't care if it's a primary or secondary.
+ replTest.stop(0);
-replTest.awaitSecondaryNodes();
-ReplSetTest.awaitRSClientHosts(s.s,
- replTest.getPrimary(),
- { ok: true, ismaster: true },
- replTest.name);
+ replTest.awaitSecondaryNodes();
+ ReplSetTest.awaitRSClientHosts(
+ s.s, replTest.getPrimary(), {ok: true, ismaster: true}, replTest.name);
-assert.writeOK(db.foo.insert({ _id: 4 }));
-assert.commandWorked(db.foo.renameCollection('bar', true));
+ assert.writeOK(db.foo.insert({_id: 4}));
+ assert.commandWorked(db.foo.renameCollection('bar', true));
-ans = db.runCommand({ getLastError: 1, w: 3, wtimeout: 5000 });
-assert.eq(ans.err, "timeout", 'gle: ' + tojson(ans));
+ ans = db.runCommand({getLastError: 1, w: 3, wtimeout: 5000});
+ assert.eq(ans.err, "timeout", 'gle: ' + tojson(ans));
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/rename_across_mongos.js b/jstests/sharding/rename_across_mongos.js
index 5d5dc1fcaf8..e9c435ecff1 100644
--- a/jstests/sharding/rename_across_mongos.js
+++ b/jstests/sharding/rename_across_mongos.js
@@ -1,29 +1,29 @@
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ name: 'rename_across_mongos', shards: 1, mongos: 2 });
-var dbName = 'RenameDB';
+ var st = new ShardingTest({name: 'rename_across_mongos', shards: 1, mongos: 2});
+ var dbName = 'RenameDB';
-st.s0.getDB(dbName).dropDatabase();
-st.s1.getDB(dbName).dropDatabase();
+ st.s0.getDB(dbName).dropDatabase();
+ st.s1.getDB(dbName).dropDatabase();
-// Create collection on first mongos and insert a document
-assert.commandWorked(st.s0.getDB(dbName).runCommand({ create: 'CollNameBeforeRename' }));
-assert.writeOK(st.s0.getDB(dbName).CollNameBeforeRename.insert({ Key: 1, Value: 1 }));
+ // Create collection on first mongos and insert a document
+ assert.commandWorked(st.s0.getDB(dbName).runCommand({create: 'CollNameBeforeRename'}));
+ assert.writeOK(st.s0.getDB(dbName).CollNameBeforeRename.insert({Key: 1, Value: 1}));
-if (st.configRS) {
- // Ensure that the second mongos will see the newly created database metadata when
- // it tries to do the collection rename.
- st.configRS.awaitLastOpCommitted();
-}
+ if (st.configRS) {
+ // Ensure that the second mongos will see the newly created database metadata when
+ // it tries to do the collection rename.
+ st.configRS.awaitLastOpCommitted();
+ }
-// Rename collection on second mongos and ensure the document is found
-assert.commandWorked(
- st.s1.getDB(dbName).CollNameBeforeRename.renameCollection('CollNameAfterRename'));
-assert.eq([{ Key: 1, Value: 1 }],
- st.s1.getDB(dbName).CollNameAfterRename.find({}, { _id: false }).toArray());
+ // Rename collection on second mongos and ensure the document is found
+ assert.commandWorked(
+ st.s1.getDB(dbName).CollNameBeforeRename.renameCollection('CollNameAfterRename'));
+ assert.eq([{Key: 1, Value: 1}],
+ st.s1.getDB(dbName).CollNameAfterRename.find({}, {_id: false}).toArray());
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/repl_monitor_refresh.js b/jstests/sharding/repl_monitor_refresh.js
index 6767a165d9d..28e86ec8d32 100644
--- a/jstests/sharding/repl_monitor_refresh.js
+++ b/jstests/sharding/repl_monitor_refresh.js
@@ -3,73 +3,71 @@
* become invalid when a replica set reconfig happens.
*/
(function() {
-"use strict";
-
-var NODE_COUNT = 3;
-var st = new ShardingTest({ shards: { rs0: { nodes: NODE_COUNT, oplogSize: 10 }}});
-var replTest = st.rs0;
-var mongos = st.s;
-
-var shardDoc = mongos.getDB('config').shards.findOne();
-assert.eq(NODE_COUNT, shardDoc.host.split(',').length); // seed list should contain all nodes
-
-/* Make sure that the first node is not the primary (by making the second one primary).
- * We need to do this since the ReplicaSetMonitor iterates over the nodes one
- * by one and you can't remove a node that is currently the primary.
- */
-var connPoolStats = mongos.getDB('admin').runCommand({ connPoolStats: 1 });
-var targetHostName = connPoolStats['replicaSets'][replTest.name].hosts[1].addr;
-
-var priConn = replTest.getPrimary();
-var confDoc = priConn.getDB("local").system.replset.findOne();
-
-for (var idx = 0; idx < confDoc.members.length; idx++) {
- if (confDoc.members[idx].host == targetHostName) {
- confDoc.members[idx].priority = 100;
- }
- else {
- confDoc.members[idx].priority = 1;
+ "use strict";
+
+ var NODE_COUNT = 3;
+ var st = new ShardingTest({shards: {rs0: {nodes: NODE_COUNT, oplogSize: 10}}});
+ var replTest = st.rs0;
+ var mongos = st.s;
+
+ var shardDoc = mongos.getDB('config').shards.findOne();
+ assert.eq(NODE_COUNT, shardDoc.host.split(',').length); // seed list should contain all nodes
+
+ /* Make sure that the first node is not the primary (by making the second one primary).
+ * We need to do this since the ReplicaSetMonitor iterates over the nodes one
+ * by one and you can't remove a node that is currently the primary.
+ */
+ var connPoolStats = mongos.getDB('admin').runCommand({connPoolStats: 1});
+ var targetHostName = connPoolStats['replicaSets'][replTest.name].hosts[1].addr;
+
+ var priConn = replTest.getPrimary();
+ var confDoc = priConn.getDB("local").system.replset.findOne();
+
+ for (var idx = 0; idx < confDoc.members.length; idx++) {
+ if (confDoc.members[idx].host == targetHostName) {
+ confDoc.members[idx].priority = 100;
+ } else {
+ confDoc.members[idx].priority = 1;
+ }
}
-}
-confDoc.version++;
+ confDoc.version++;
-jsTest.log('Changing conf to ' + tojson(confDoc));
+ jsTest.log('Changing conf to ' + tojson(confDoc));
-try {
- priConn.getDB('admin').adminCommand({ replSetReconfig: confDoc });
-} catch (x) {
- print('Expected exception because of reconfig' + x);
-}
+ try {
+ priConn.getDB('admin').adminCommand({replSetReconfig: confDoc});
+ } catch (x) {
+ print('Expected exception because of reconfig' + x);
+ }
-ReplSetTest.awaitRSClientHosts(mongos, { host: targetHostName },
- { ok: true, ismaster: true });
+ ReplSetTest.awaitRSClientHosts(mongos, {host: targetHostName}, {ok: true, ismaster: true});
-// Remove first node from set
-confDoc.members.shift();
-confDoc.version++;
+ // Remove first node from set
+ confDoc.members.shift();
+ confDoc.version++;
-try {
- replTest.getPrimary().getDB('admin').adminCommand({ replSetReconfig: confDoc });
-} catch (x) {
- print('Expected exception because of reconfig: ' + x);
-}
+ try {
+ replTest.getPrimary().getDB('admin').adminCommand({replSetReconfig: confDoc});
+ } catch (x) {
+ print('Expected exception because of reconfig: ' + x);
+ }
-assert.soon(function() {
- var connPoolStats = mongos.getDB('admin').runCommand('connPoolStats');
- var replView = connPoolStats.replicaSets[replTest.name].hosts;
- jsTest.log('current replView: ' + tojson(replView));
+ assert.soon(function() {
+ var connPoolStats = mongos.getDB('admin').runCommand('connPoolStats');
+ var replView = connPoolStats.replicaSets[replTest.name].hosts;
+ jsTest.log('current replView: ' + tojson(replView));
- return replView.length == NODE_COUNT - 1;
-});
+ return replView.length == NODE_COUNT - 1;
+ });
-assert.soon(function() {
- shardDoc = mongos.getDB('config').shards.findOne();
- jsTest.log('shardDoc: ' + tojson(shardDoc));
- // seed list should contain one less node
- return shardDoc.host.split(',').length == NODE_COUNT - 1;
-});
+ assert.soon(function() {
+ shardDoc = mongos.getDB('config').shards.findOne();
+ jsTest.log('shardDoc: ' + tojson(shardDoc));
+ // seed list should contain one less node
+ return shardDoc.host.split(',').length == NODE_COUNT - 1;
+ });
-st.stop();
+ st.stop();
}()); \ No newline at end of file
diff --git a/jstests/sharding/replmonitor_bad_seed.js b/jstests/sharding/replmonitor_bad_seed.js
index 43602ae26ed..1d52ac47abc 100644
--- a/jstests/sharding/replmonitor_bad_seed.js
+++ b/jstests/sharding/replmonitor_bad_seed.js
@@ -13,16 +13,18 @@
* was able to refresh before proceeding to check.
*/
-var rsOpt = { oplogSize: 10 };
-var st = new ShardingTest({ shards: 1, rs: rsOpt });
+var rsOpt = {
+ oplogSize: 10
+};
+var st = new ShardingTest({shards: 1, rs: rsOpt});
var mongos = st.s;
var replTest = st.rs0;
var adminDB = mongos.getDB('admin');
-//adminDB.runCommand({ addShard: replTest.getURL() });
+// adminDB.runCommand({ addShard: replTest.getURL() });
-adminDB.runCommand({ enableSharding: 'test' });
-adminDB.runCommand({ shardCollection: 'test.user', key: { x: 1 }});
+adminDB.runCommand({enableSharding: 'test'});
+adminDB.runCommand({shardCollection: 'test.user', key: {x: 1}});
/* The cluster now has the shard information. Then kill the replica set so
* when mongos restarts and tries to create a ReplSetMonitor for that shard,
@@ -30,13 +32,13 @@ adminDB.runCommand({ shardCollection: 'test.user', key: { x: 1 }});
*/
replTest.stopSet();
st.restartMongos(0);
-mongos = st.s; // refresh mongos with the new one
+mongos = st.s; // refresh mongos with the new one
var coll = mongos.getDB('test').user;
var verifyInsert = function() {
var beforeCount = coll.find().count();
- coll.insert({ x: 1 });
+ coll.insert({x: 1});
var afterCount = coll.find().count();
assert.eq(beforeCount + 1, afterCount);
@@ -45,15 +47,14 @@ var verifyInsert = function() {
jsTest.log('Insert to a downed replSet');
assert.throws(verifyInsert);
-replTest.startSet({ oplogSize: 10 });
+replTest.startSet({oplogSize: 10});
replTest.initiate();
replTest.awaitSecondaryNodes();
jsTest.log('Insert to an online replSet');
// Verify that the replSetMonitor can reach the restarted set.
-ReplSetTest.awaitRSClientHosts(mongos, replTest.nodes, { ok: true });
+ReplSetTest.awaitRSClientHosts(mongos, replTest.nodes, {ok: true});
verifyInsert();
st.stop();
-
diff --git a/jstests/sharding/return_partial_shards_down.js b/jstests/sharding/return_partial_shards_down.js
index d2519f0ae5e..a8eca975283 100644
--- a/jstests/sharding/return_partial_shards_down.js
+++ b/jstests/sharding/return_partial_shards_down.js
@@ -2,9 +2,7 @@
// Tests that zero results are correctly returned with returnPartial and shards down
//
-var st = new ShardingTest({shards : 3,
- mongos : 1,
- other : {mongosOptions : {verbose : 2}}});
+var st = new ShardingTest({shards: 3, mongos: 1, other: {mongosOptions: {verbose: 2}}});
// Stop balancer, we're doing our own manual chunk distribution
st.stopBalancer();
@@ -14,42 +12,31 @@ var config = mongos.getDB("config");
var admin = mongos.getDB("admin");
var shards = config.shards.find().toArray();
-for ( var i = 0; i < shards.length; i++) {
+for (var i = 0; i < shards.length; i++) {
shards[i].conn = new Mongo(shards[i].host);
}
var collOneShard = mongos.getCollection("foo.collOneShard");
var collAllShards = mongos.getCollection("foo.collAllShards");
-printjson(admin.runCommand({enableSharding : collOneShard.getDB() + ""}));
-printjson(admin.runCommand({movePrimary : collOneShard.getDB() + "",
- to : shards[0]._id}));
+printjson(admin.runCommand({enableSharding: collOneShard.getDB() + ""}));
+printjson(admin.runCommand({movePrimary: collOneShard.getDB() + "", to: shards[0]._id}));
-printjson(admin.runCommand({shardCollection : collOneShard + "",
- key : {_id : 1}}));
-printjson(admin.runCommand({shardCollection : collAllShards + "",
- key : {_id : 1}}));
+printjson(admin.runCommand({shardCollection: collOneShard + "", key: {_id: 1}}));
+printjson(admin.runCommand({shardCollection: collAllShards + "", key: {_id: 1}}));
// Split and move the "both shard" collection to both shards
-printjson(admin.runCommand({split : collAllShards + "",
- middle : {_id : 0}}));
-printjson(admin.runCommand({split : collAllShards + "",
- middle : {_id : 1000}}));
-printjson(admin.runCommand({moveChunk : collAllShards + "",
- find : {_id : 0},
- to : shards[1]._id}));
-printjson(admin.runCommand({moveChunk : collAllShards + "",
- find : {_id : 1000},
- to : shards[2]._id}));
+printjson(admin.runCommand({split: collAllShards + "", middle: {_id: 0}}));
+printjson(admin.runCommand({split: collAllShards + "", middle: {_id: 1000}}));
+printjson(admin.runCommand({moveChunk: collAllShards + "", find: {_id: 0}, to: shards[1]._id}));
+printjson(admin.runCommand({moveChunk: collAllShards + "", find: {_id: 1000}, to: shards[2]._id}));
// Collections are now distributed correctly
jsTest.log("Collections now distributed correctly.");
st.printShardingStatus();
-var inserts = [{_id : -1},
- {_id : 1},
- {_id : 1000}];
+var inserts = [{_id: -1}, {_id: 1}, {_id: 1000}];
collOneShard.insert(inserts);
assert.writeOK(collAllShards.insert(inserts));
diff --git a/jstests/sharding/rs_stepdown_and_pooling.js b/jstests/sharding/rs_stepdown_and_pooling.js
index 3cc97bfe147..928bd515635 100644
--- a/jstests/sharding/rs_stepdown_and_pooling.js
+++ b/jstests/sharding/rs_stepdown_and_pooling.js
@@ -2,115 +2,110 @@
// Tests what happens when a replica set primary goes down with pooled connections.
//
(function() {
-"use strict";
-
-var st = new ShardingTest({shards : {rs0 : {nodes : 2}}, mongos : 1});
-
-// Stop balancer to eliminate weird conn stuff
-st.stopBalancer();
-
-var mongos = st.s0;
-var coll = mongos.getCollection("foo.bar");
-var db = coll.getDB();
-
-//Test is not valid for Win32
-var is32Bits = ( db.serverBuildInfo().bits == 32 );
-if ( is32Bits && _isWindows() ) {
-
- // Win32 doesn't provide the polling interface we need to implement the check tested here
- jsTest.log( "Test is not valid on Win32 platform." );
-
-}
-else {
-
- // Non-Win32 platform
-
- var primary = st.rs0.getPrimary();
- var secondary = st.rs0.getSecondary();
-
- jsTest.log("Creating new connections...");
-
- // Create a bunch of connections to the primary node through mongos.
- // jstest ->(x10)-> mongos ->(x10)-> primary
- var conns = [];
- for ( var i = 0; i < 50; i++) {
- conns.push(new Mongo(mongos.host));
- conns[i].getCollection(coll + "").findOne();
- }
-
- jsTest.log("Returning the connections back to the pool.");
-
- for ( var i = 0; i < conns.length; i++ ) {
- conns[i] = null;
- }
- // Make sure we return connections back to the pool
- gc();
-
- // Don't make test fragile by linking to format of shardConnPoolStats, but this is useful if
- // something goes wrong.
- var connPoolStats = mongos.getDB("admin").runCommand({ shardConnPoolStats : 1 });
- printjson( connPoolStats );
-
- jsTest.log("Stepdown primary and then step back up...");
-
- var stepDown = function(node, timeSecs) {
- var result = null;
- try {
- result = node.getDB("admin").runCommand({ replSetStepDown : timeSecs, force : true });
- // Should not get here
- } catch (e) {
- printjson(e);
+ "use strict";
+
+ var st = new ShardingTest({shards: {rs0: {nodes: 2}}, mongos: 1});
+
+ // Stop balancer to eliminate weird conn stuff
+ st.stopBalancer();
+
+ var mongos = st.s0;
+ var coll = mongos.getCollection("foo.bar");
+ var db = coll.getDB();
+
+ // Test is not valid for Win32
+ var is32Bits = (db.serverBuildInfo().bits == 32);
+ if (is32Bits && _isWindows()) {
+ // Win32 doesn't provide the polling interface we need to implement the check tested here
+ jsTest.log("Test is not valid on Win32 platform.");
+
+ } else {
+ // Non-Win32 platform
+
+ var primary = st.rs0.getPrimary();
+ var secondary = st.rs0.getSecondary();
+
+ jsTest.log("Creating new connections...");
+
+ // Create a bunch of connections to the primary node through mongos.
+ // jstest ->(x10)-> mongos ->(x10)-> primary
+ var conns = [];
+ for (var i = 0; i < 50; i++) {
+ conns.push(new Mongo(mongos.host));
+ conns[i].getCollection(coll + "").findOne();
}
-
- if (result != null) printjson(result);
- assert.eq(null, result);
- };
-
- stepDown(primary, 0);
-
- jsTest.log("Waiting for mongos to acknowledge stepdown...");
-
- ReplSetTest.awaitRSClientHosts( mongos,
- secondary,
- { ismaster : true },
- st.rs0,
- 2 * 60 * 1000 ); // slow hosts can take longer to recognize sd
-
- jsTest.log("Stepping back up...");
-
- stepDown(secondary, 10000);
-
- jsTest.log("Waiting for mongos to acknowledge step up...");
-
- ReplSetTest.awaitRSClientHosts( mongos,
- primary,
- { ismaster : true },
- st.rs0,
- 2 * 60 * 1000 );
-
- jsTest.log("Waiting for socket timeout time...");
-
- // Need to wait longer than the socket polling time.
- sleep(2 * 5000);
-
- jsTest.log("Run queries using new connections.");
-
- var numErrors = 0;
- for ( var i = 0; i < conns.length; i++) {
- var newConn = new Mongo(mongos.host);
- try {
- printjson(newConn.getCollection("foo.bar").findOne());
- } catch (e) {
- printjson(e);
- numErrors++;
+
+ jsTest.log("Returning the connections back to the pool.");
+
+ for (var i = 0; i < conns.length; i++) {
+ conns[i] = null;
}
- }
-
- assert.eq(0, numErrors);
+ // Make sure we return connections back to the pool
+ gc();
+
+ // Don't make test fragile by linking to format of shardConnPoolStats, but this is useful if
+ // something goes wrong.
+ var connPoolStats = mongos.getDB("admin").runCommand({shardConnPoolStats: 1});
+ printjson(connPoolStats);
+
+ jsTest.log("Stepdown primary and then step back up...");
+
+ var stepDown = function(node, timeSecs) {
+ var result = null;
+ try {
+ result = node.getDB("admin").runCommand({replSetStepDown: timeSecs, force: true});
+ // Should not get here
+ } catch (e) {
+ printjson(e);
+ }
+
+ if (result != null)
+ printjson(result);
+ assert.eq(null, result);
+ };
+
+ stepDown(primary, 0);
+
+ jsTest.log("Waiting for mongos to acknowledge stepdown...");
+
+ ReplSetTest.awaitRSClientHosts(
+ mongos,
+ secondary,
+ {ismaster: true},
+ st.rs0,
+ 2 * 60 * 1000); // slow hosts can take longer to recognize sd
+
+ jsTest.log("Stepping back up...");
+
+ stepDown(secondary, 10000);
+
+ jsTest.log("Waiting for mongos to acknowledge step up...");
+
+ ReplSetTest.awaitRSClientHosts(mongos, primary, {ismaster: true}, st.rs0, 2 * 60 * 1000);
+
+ jsTest.log("Waiting for socket timeout time...");
+
+ // Need to wait longer than the socket polling time.
+ sleep(2 * 5000);
+
+ jsTest.log("Run queries using new connections.");
+
+ var numErrors = 0;
+ for (var i = 0; i < conns.length; i++) {
+ var newConn = new Mongo(mongos.host);
+ try {
+ printjson(newConn.getCollection("foo.bar").findOne());
+ } catch (e) {
+ printjson(e);
+ numErrors++;
+ }
+ }
+
+ assert.eq(0, numErrors);
-} // End Win32 check
+ } // End Win32 check
-jsTest.log("DONE!");
+ jsTest.log("DONE!");
-st.stop();
+ st.stop();
}()); \ No newline at end of file
diff --git a/jstests/sharding/secondary_query_routing.js b/jstests/sharding/secondary_query_routing.js
index 8b9649a23ad..ff0dfcb22d9 100644
--- a/jstests/sharding/secondary_query_routing.js
+++ b/jstests/sharding/secondary_query_routing.js
@@ -4,35 +4,35 @@
*/
(function() {
-var rsOpts = { nodes: 2 };
-var st = new ShardingTest({ mongos: 2, shards: { rs0: rsOpts, rs1: rsOpts }});
+ var rsOpts = {
+ nodes: 2
+ };
+ var st = new ShardingTest({mongos: 2, shards: {rs0: rsOpts, rs1: rsOpts}});
-st.s0.adminCommand({ enableSharding: 'test' });
+ st.s0.adminCommand({enableSharding: 'test'});
-st.ensurePrimaryShard('test', 'test-rs0');
-st.s0.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
-st.s0.adminCommand({ split: 'test.user', middle: { x: 0 }});
+ st.ensurePrimaryShard('test', 'test-rs0');
+ st.s0.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+ st.s0.adminCommand({split: 'test.user', middle: {x: 0}});
-st.s1.setReadPref('secondary');
-var testDB = st.s1.getDB('test');
-// This establishes the shard version Mongos #1's view.
-testDB.user.insert({ x: 1 });
+ st.s1.setReadPref('secondary');
+ var testDB = st.s1.getDB('test');
+ // This establishes the shard version Mongos #1's view.
+ testDB.user.insert({x: 1});
-// Mongos #0 bumps up the version without Mongos #1 knowledge.
-// Note: moveChunk has implicit { w: 2 } write concern.
-st.s0.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'test-rs1',
- _waitForDelete: true });
+ // Mongos #0 bumps up the version without Mongos #1 knowledge.
+ // Note: moveChunk has implicit { w: 2 } write concern.
+ st.s0.adminCommand(
+ {moveChunk: 'test.user', find: {x: 0}, to: 'test-rs1', _waitForDelete: true});
-// Clear all the connections to make sure that Mongos #1 will attempt to establish
-// the shard version.
-assert.commandWorked(testDB.adminCommand({ connPoolSync: 1 }));
+ // Clear all the connections to make sure that Mongos #1 will attempt to establish
+ // the shard version.
+ assert.commandWorked(testDB.adminCommand({connPoolSync: 1}));
-// Mongos #1 performs a query to the secondary.
-var res = testDB.runReadCommand({ count: 'user', query: { x: 1 }});
-assert(res.ok);
-assert.eq(1, res.n, tojson(res));
+ // Mongos #1 performs a query to the secondary.
+ var res = testDB.runReadCommand({count: 'user', query: {x: 1}});
+ assert(res.ok);
+ assert.eq(1, res.n, tojson(res));
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/server_status.js b/jstests/sharding/server_status.js
index 094cb3ca433..b8e59b22275 100644
--- a/jstests/sharding/server_status.js
+++ b/jstests/sharding/server_status.js
@@ -4,48 +4,47 @@
*/
(function() {
-"use strict";
-
-var st = new ShardingTest({ shards: 1 });
-
-var testDB = st.s.getDB('test');
-testDB.adminCommand({ enableSharding: 'test' });
-testDB.adminCommand({ shardCollection: 'test.user', key: { _id: 1 }});
-
-// Initialize shard metadata in shards
-testDB.user.insert({ x: 1 });
-
-var checkShardingServerStatus = function(doc, isCSRS) {
- var shardingSection = doc.sharding;
- assert.neq(shardingSection, null);
-
- var configConnStr = shardingSection.configsvrConnectionString;
- var configConn = new Mongo(configConnStr);
- var configIsMaster = configConn.getDB('admin').runCommand({ isMaster: 1 });
-
- var configOpTimeObj = shardingSection.lastSeenConfigServerOpTime;
-
- if (isCSRS) {
- assert.gt(configConnStr.indexOf('/'), 0);
- assert.eq(1, configIsMaster.configsvr); // If it's a shard, this field won't exist.
- assert.neq(null, configOpTimeObj);
- assert.neq(null, configOpTimeObj.ts);
- assert.neq(null, configOpTimeObj.t);
- }
- else {
- assert.eq(-1, configConnStr.indexOf('/'));
- assert.gt(configConnStr.indexOf(','), 0);
- assert.eq(0, configIsMaster.configsvr);
- assert.eq(null, configOpTimeObj);
- }
-};
-
-var mongosServerStatus = testDB.adminCommand({ serverStatus: 1 });
-var isCSRS = st.configRS != null;
-checkShardingServerStatus(mongosServerStatus, isCSRS);
-
-var mongodServerStatus = st.d0.getDB('admin').runCommand({ serverStatus: 1 });
-checkShardingServerStatus(mongodServerStatus, isCSRS);
-
-st.stop();
+ "use strict";
+
+ var st = new ShardingTest({shards: 1});
+
+ var testDB = st.s.getDB('test');
+ testDB.adminCommand({enableSharding: 'test'});
+ testDB.adminCommand({shardCollection: 'test.user', key: {_id: 1}});
+
+ // Initialize shard metadata in shards
+ testDB.user.insert({x: 1});
+
+ var checkShardingServerStatus = function(doc, isCSRS) {
+ var shardingSection = doc.sharding;
+ assert.neq(shardingSection, null);
+
+ var configConnStr = shardingSection.configsvrConnectionString;
+ var configConn = new Mongo(configConnStr);
+ var configIsMaster = configConn.getDB('admin').runCommand({isMaster: 1});
+
+ var configOpTimeObj = shardingSection.lastSeenConfigServerOpTime;
+
+ if (isCSRS) {
+ assert.gt(configConnStr.indexOf('/'), 0);
+ assert.eq(1, configIsMaster.configsvr); // If it's a shard, this field won't exist.
+ assert.neq(null, configOpTimeObj);
+ assert.neq(null, configOpTimeObj.ts);
+ assert.neq(null, configOpTimeObj.t);
+ } else {
+ assert.eq(-1, configConnStr.indexOf('/'));
+ assert.gt(configConnStr.indexOf(','), 0);
+ assert.eq(0, configIsMaster.configsvr);
+ assert.eq(null, configOpTimeObj);
+ }
+ };
+
+ var mongosServerStatus = testDB.adminCommand({serverStatus: 1});
+ var isCSRS = st.configRS != null;
+ checkShardingServerStatus(mongosServerStatus, isCSRS);
+
+ var mongodServerStatus = st.d0.getDB('admin').runCommand({serverStatus: 1});
+ checkShardingServerStatus(mongodServerStatus, isCSRS);
+
+ st.stop();
})();
diff --git a/jstests/sharding/shard1.js b/jstests/sharding/shard1.js
index faf852c6044..3b97bbc0306 100644
--- a/jstests/sharding/shard1.js
+++ b/jstests/sharding/shard1.js
@@ -4,44 +4,51 @@
s = new ShardingTest({name: "shard1", shards: 2});
-db = s.getDB( "test" );
-db.foo.insert( { num : 1 , name : "eliot" } );
-db.foo.insert( { num : 2 , name : "sara" } );
-db.foo.insert( { num : -1 , name : "joe" } );
-db.foo.ensureIndex( { num : 1 } );
-assert.eq( 3 , db.foo.find().length() , "A" );
-
-shardCommand = { shardcollection : "test.foo" , key : { num : 1 } };
-
-assert.throws( function(){ s.adminCommand( shardCommand ); } );
-
-s.adminCommand( { enablesharding : "test" } );
+db = s.getDB("test");
+db.foo.insert({num: 1, name: "eliot"});
+db.foo.insert({num: 2, name: "sara"});
+db.foo.insert({num: -1, name: "joe"});
+db.foo.ensureIndex({num: 1});
+assert.eq(3, db.foo.find().length(), "A");
+
+shardCommand = {
+ shardcollection: "test.foo",
+ key: {num: 1}
+};
+
+assert.throws(function() {
+ s.adminCommand(shardCommand);
+});
+
+s.adminCommand({enablesharding: "test"});
s.ensurePrimaryShard('test', 'shard0001');
-assert.eq( 3 , db.foo.find().length() , "after partitioning count failed" );
+assert.eq(3, db.foo.find().length(), "after partitioning count failed");
-s.adminCommand( shardCommand );
+s.adminCommand(shardCommand);
-assert.throws( function(){ s.adminCommand({ shardCollection: 'test', key: { x: 1 }}); });
-assert.throws( function(){ s.adminCommand({ shardCollection: '.foo', key: { x: 1 }}); });
+assert.throws(function() {
+ s.adminCommand({shardCollection: 'test', key: {x: 1}});
+});
+assert.throws(function() {
+ s.adminCommand({shardCollection: '.foo', key: {x: 1}});
+});
-var cconfig = s.config.collections.findOne( { _id : "test.foo" } );
-assert( cconfig , "why no collection entry for test.foo" );
+var cconfig = s.config.collections.findOne({_id: "test.foo"});
+assert(cconfig, "why no collection entry for test.foo");
delete cconfig.lastmod;
delete cconfig.dropped;
delete cconfig.lastmodEpoch;
-assert.eq(cconfig,
- { _id : "test.foo" , key : { num : 1 } , unique : false },
- "Sharded content mismatch");
+assert.eq(cconfig, {_id: "test.foo", key: {num: 1}, unique: false}, "Sharded content mismatch");
-s.config.collections.find().forEach( printjson );
+s.config.collections.find().forEach(printjson);
-assert.eq( 1 , s.config.chunks.count() , "num chunks A");
+assert.eq(1, s.config.chunks.count(), "num chunks A");
si = s.config.chunks.findOne();
-assert( si );
-assert.eq( si.ns , "test.foo" );
+assert(si);
+assert.eq(si.ns, "test.foo");
-assert.eq( 3 , db.foo.find().length() , "after sharding, no split count failed" );
+assert.eq(3, db.foo.find().length(), "after sharding, no split count failed");
s.stop();
diff --git a/jstests/sharding/shard2.js b/jstests/sharding/shard2.js
index f4946e13573..abe91508650 100644
--- a/jstests/sharding/shard2.js
+++ b/jstests/sharding/shard2.js
@@ -4,18 +4,18 @@
* test basic sharding
*/
-placeCheck = function( num ){
- print("shard2 step: " + num );
+placeCheck = function(num) {
+ print("shard2 step: " + num);
};
-printAll = function(){
- print( "****************" );
- db.foo.find().forEach( printjsononeline );
- print( "++++++++++++++++++" );
- primary.foo.find().forEach( printjsononeline );
- print( "++++++++++++++++++" );
- secondary.foo.find().forEach( printjsononeline );
- print( "---------------------" );
+printAll = function() {
+ print("****************");
+ db.foo.find().forEach(printjsononeline);
+ print("++++++++++++++++++");
+ primary.foo.find().forEach(printjsononeline);
+ print("++++++++++++++++++");
+ secondary.foo.find().forEach(printjsononeline);
+ print("---------------------");
};
s = new ShardingTest({name: "shard2", shards: 2});
@@ -24,205 +24,221 @@ s = new ShardingTest({name: "shard2", shards: 2});
// it moves small #s of chunks too
s.stopBalancer();
-db = s.getDB( "test" );
+db = s.getDB("test");
-s.adminCommand( { enablesharding : "test" } );
+s.adminCommand({enablesharding: "test"});
s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
-assert.eq( 1 , s.config.chunks.count() , "sanity check 1" );
+s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
+assert.eq(1, s.config.chunks.count(), "sanity check 1");
-s.adminCommand( { split : "test.foo" , middle : { num : 0 } } );
-assert.eq( 2 , s.config.chunks.count() , "should be 2 shards" );
+s.adminCommand({split: "test.foo", middle: {num: 0}});
+assert.eq(2, s.config.chunks.count(), "should be 2 shards");
chunks = s.config.chunks.find().toArray();
-assert.eq( chunks[0].shard , chunks[1].shard , "server should be the same after a split" );
+assert.eq(chunks[0].shard, chunks[1].shard, "server should be the same after a split");
+db.foo.save({num: 1, name: "eliot"});
+db.foo.save({num: 2, name: "sara"});
+db.foo.save({num: -1, name: "joe"});
-db.foo.save( { num : 1 , name : "eliot" } );
-db.foo.save( { num : 2 , name : "sara" } );
-db.foo.save( { num : -1 , name : "joe" } );
+assert.eq(3,
+ s.getPrimaryShard("test").getDB("test").foo.find().length(),
+ "not right directly to db A");
+assert.eq(3, db.foo.find().length(), "not right on shard");
-assert.eq( 3 , s.getPrimaryShard( "test" ).getDB( "test" ).foo.find().length(),
- "not right directly to db A" );
-assert.eq( 3 , db.foo.find().length() , "not right on shard" );
+primary = s.getPrimaryShard("test").getDB("test");
+secondary = s.getOther(primary).getDB("test");
-primary = s.getPrimaryShard( "test" ).getDB( "test" );
-secondary = s.getOther( primary ).getDB( "test" );
+assert.eq(3, primary.foo.find().length(), "primary wrong B");
+assert.eq(0, secondary.foo.find().length(), "secondary wrong C");
+assert.eq(3, db.foo.find().sort({num: 1}).length());
-assert.eq( 3 , primary.foo.find().length() , "primary wrong B" );
-assert.eq( 0 , secondary.foo.find().length() , "secondary wrong C" );
-assert.eq( 3 , db.foo.find().sort( { num : 1 } ).length() );
-
-placeCheck( 2 );
+placeCheck(2);
// NOTE: at this point we have 2 shard on 1 server
// test move shard
-assert.throws( function(){ s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : primary.getMongo().name, _waitForDelete : true } ); } );
-assert.throws( function(){ s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : "adasd", _waitForDelete : true } ); } );
-
-s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : secondary.getMongo().name, _waitForDelete : true } );
-assert.eq( 2 , secondary.foo.find().length() , "secondary should have 2 after move shard" );
-assert.eq( 1 , primary.foo.find().length() , "primary should only have 1 after move shard" );
-
-assert.eq( 2 , s.config.chunks.count() , "still should have 2 shards after move not:" + s.getChunksString() );
+assert.throws(function() {
+ s.adminCommand({
+ movechunk: "test.foo",
+ find: {num: 1},
+ to: primary.getMongo().name,
+ _waitForDelete: true
+ });
+});
+assert.throws(function() {
+ s.adminCommand({movechunk: "test.foo", find: {num: 1}, to: "adasd", _waitForDelete: true});
+});
+
+s.adminCommand(
+ {movechunk: "test.foo", find: {num: 1}, to: secondary.getMongo().name, _waitForDelete: true});
+assert.eq(2, secondary.foo.find().length(), "secondary should have 2 after move shard");
+assert.eq(1, primary.foo.find().length(), "primary should only have 1 after move shard");
+
+assert.eq(2,
+ s.config.chunks.count(),
+ "still should have 2 shards after move not:" + s.getChunksString());
chunks = s.config.chunks.find().toArray();
-assert.neq( chunks[0].shard , chunks[1].shard , "servers should NOT be the same after the move" );
+assert.neq(chunks[0].shard, chunks[1].shard, "servers should NOT be the same after the move");
-placeCheck( 3 );
+placeCheck(3);
// test inserts go to right server/shard
-assert.writeOK(db.foo.save( { num : 3 , name : "bob" } ));
-assert.eq( 1 , primary.foo.find().length() , "after move insert go wrong place?" );
-assert.eq( 3 , secondary.foo.find().length() , "after move insert go wrong place?" );
+assert.writeOK(db.foo.save({num: 3, name: "bob"}));
+assert.eq(1, primary.foo.find().length(), "after move insert go wrong place?");
+assert.eq(3, secondary.foo.find().length(), "after move insert go wrong place?");
-assert.writeOK(db.foo.save( { num : -2 , name : "funny man" } ));
-assert.eq( 2 , primary.foo.find().length() , "after move insert go wrong place?" );
-assert.eq( 3 , secondary.foo.find().length() , "after move insert go wrong place?" );
+assert.writeOK(db.foo.save({num: -2, name: "funny man"}));
+assert.eq(2, primary.foo.find().length(), "after move insert go wrong place?");
+assert.eq(3, secondary.foo.find().length(), "after move insert go wrong place?");
+assert.writeOK(db.foo.save({num: 0, name: "funny guy"}));
+assert.eq(2, primary.foo.find().length(), "boundary A");
+assert.eq(4, secondary.foo.find().length(), "boundary B");
-assert.writeOK(db.foo.save( { num : 0 , name : "funny guy" } ));
-assert.eq( 2 , primary.foo.find().length() , "boundary A" );
-assert.eq( 4 , secondary.foo.find().length() , "boundary B" );
-
-placeCheck( 4 );
+placeCheck(4);
// findOne
-assert.eq( "eliot" , db.foo.findOne( { num : 1 } ).name );
-assert.eq( "funny man" , db.foo.findOne( { num : -2 } ).name );
+assert.eq("eliot", db.foo.findOne({num: 1}).name);
+assert.eq("funny man", db.foo.findOne({num: -2}).name);
// getAll
-function sumQuery( c ){
+function sumQuery(c) {
var sum = 0;
- c.toArray().forEach(
- function(z){
- sum += z.num;
- }
- );
+ c.toArray().forEach(function(z) {
+ sum += z.num;
+ });
return sum;
}
-assert.eq( 6 , db.foo.find().length() , "sharded query 1" );
-assert.eq( 3 , sumQuery( db.foo.find() ) , "sharded query 2" );
+assert.eq(6, db.foo.find().length(), "sharded query 1");
+assert.eq(3, sumQuery(db.foo.find()), "sharded query 2");
-placeCheck( 5 );
+placeCheck(5);
// sort by num
-assert.eq( 3 , sumQuery( db.foo.find().sort( { num : 1 } ) ) , "sharding query w/sort 1" );
-assert.eq( 3 , sumQuery( db.foo.find().sort( { num : -1 } ) ) , "sharding query w/sort 2" );
+assert.eq(3, sumQuery(db.foo.find().sort({num: 1})), "sharding query w/sort 1");
+assert.eq(3, sumQuery(db.foo.find().sort({num: -1})), "sharding query w/sort 2");
-assert.eq( "funny man" , db.foo.find().sort( { num : 1 } )[0].name , "sharding query w/sort 3 order wrong" );
-assert.eq( -2 , db.foo.find().sort( { num : 1 } )[0].num , "sharding query w/sort 4 order wrong" );
+assert.eq("funny man", db.foo.find().sort({num: 1})[0].name, "sharding query w/sort 3 order wrong");
+assert.eq(-2, db.foo.find().sort({num: 1})[0].num, "sharding query w/sort 4 order wrong");
-assert.eq( "bob" , db.foo.find().sort( { num : -1 } )[0].name , "sharding query w/sort 5 order wrong" );
-assert.eq( 3 , db.foo.find().sort( { num : -1 } )[0].num , "sharding query w/sort 6 order wrong" );
+assert.eq("bob", db.foo.find().sort({num: -1})[0].name, "sharding query w/sort 5 order wrong");
+assert.eq(3, db.foo.find().sort({num: -1})[0].num, "sharding query w/sort 6 order wrong");
-placeCheck( 6 );
+placeCheck(6);
// sory by name
-function getNames( c ){
- return c.toArray().map( function(z){ return z.name; } );
+function getNames(c) {
+ return c.toArray().map(function(z) {
+ return z.name;
+ });
}
-correct = getNames( db.foo.find() ).sort();
-assert.eq( correct , getNames( db.foo.find().sort( { name : 1 } ) ) );
+correct = getNames(db.foo.find()).sort();
+assert.eq(correct, getNames(db.foo.find().sort({name: 1})));
correct = correct.reverse();
-assert.eq( correct , getNames( db.foo.find().sort( { name : -1 } ) ) );
-
-assert.eq( 3 , sumQuery( db.foo.find().sort( { name : 1 } ) ) , "sharding query w/non-shard sort 1" );
-assert.eq( 3 , sumQuery( db.foo.find().sort( { name : -1 } ) ) , "sharding query w/non-shard sort 2" );
+assert.eq(correct, getNames(db.foo.find().sort({name: -1})));
+assert.eq(3, sumQuery(db.foo.find().sort({name: 1})), "sharding query w/non-shard sort 1");
+assert.eq(3, sumQuery(db.foo.find().sort({name: -1})), "sharding query w/non-shard sort 2");
// sort by num multiple shards per server
-s.adminCommand( { split : "test.foo" , middle : { num : 2 } } );
-assert.eq( "funny man" , db.foo.find().sort( { num : 1 } )[0].name , "sharding query w/sort and another split 1 order wrong" );
-assert.eq( "bob" , db.foo.find().sort( { num : -1 } )[0].name , "sharding query w/sort and another split 2 order wrong" );
-assert.eq( "funny man" , db.foo.find( { num : { $lt : 100 } } ).sort( { num : 1 } ).arrayAccess(0).name , "sharding query w/sort and another split 3 order wrong" );
-
-placeCheck( 7 );
-
-db.foo.find().sort( { _id : 1 } ).forEach( function(z){ print( z._id ); } );
+s.adminCommand({split: "test.foo", middle: {num: 2}});
+assert.eq("funny man",
+ db.foo.find().sort({num: 1})[0].name,
+ "sharding query w/sort and another split 1 order wrong");
+assert.eq("bob",
+ db.foo.find().sort({num: -1})[0].name,
+ "sharding query w/sort and another split 2 order wrong");
+assert.eq("funny man",
+ db.foo.find({num: {$lt: 100}}).sort({num: 1}).arrayAccess(0).name,
+ "sharding query w/sort and another split 3 order wrong");
+
+placeCheck(7);
+
+db.foo.find().sort({_id: 1}).forEach(function(z) {
+ print(z._id);
+});
zzz = db.foo.find().explain("executionStats").executionStats;
-assert.eq( 0 , zzz.totalKeysExamined , "EX1a" );
-assert.eq( 6 , zzz.nReturned , "EX1b" );
-assert.eq( 6 , zzz.totalDocsExamined , "EX1c" );
+assert.eq(0, zzz.totalKeysExamined, "EX1a");
+assert.eq(6, zzz.nReturned, "EX1b");
+assert.eq(6, zzz.totalDocsExamined, "EX1c");
-zzz = db.foo.find().hint( { _id : 1 } ).sort( { _id : 1 } )
- .explain("executionStats").executionStats;
-assert.eq( 6 , zzz.totalKeysExamined , "EX2a" );
-assert.eq( 6 , zzz.nReturned , "EX2b" );
-assert.eq( 6 , zzz.totalDocsExamined , "EX2c" );
+zzz = db.foo.find().hint({_id: 1}).sort({_id: 1}).explain("executionStats").executionStats;
+assert.eq(6, zzz.totalKeysExamined, "EX2a");
+assert.eq(6, zzz.nReturned, "EX2b");
+assert.eq(6, zzz.totalDocsExamined, "EX2c");
// getMore
-assert.eq( 4 , db.foo.find().limit(-4).toArray().length , "getMore 1" );
-function countCursor( c ){
+assert.eq(4, db.foo.find().limit(-4).toArray().length, "getMore 1");
+function countCursor(c) {
var num = 0;
- while ( c.hasNext() ){
+ while (c.hasNext()) {
c.next();
num++;
}
return num;
}
-assert.eq( 6 , countCursor( db.foo.find()._exec() ) , "getMore 2" );
-assert.eq( 6 , countCursor( db.foo.find().batchSize(1)._exec() ) , "getMore 3" );
+assert.eq(6, countCursor(db.foo.find()._exec()), "getMore 2");
+assert.eq(6, countCursor(db.foo.find().batchSize(1)._exec()), "getMore 3");
// find by non-shard-key
-db.foo.find().forEach(
- function(z){
- var y = db.foo.findOne( { _id : z._id } );
- assert( y , "_id check 1 : " + tojson( z ) );
- assert.eq( z.num , y.num , "_id check 2 : " + tojson( z ) );
- }
-);
+db.foo.find().forEach(function(z) {
+ var y = db.foo.findOne({_id: z._id});
+ assert(y, "_id check 1 : " + tojson(z));
+ assert.eq(z.num, y.num, "_id check 2 : " + tojson(z));
+});
// update
-person = db.foo.findOne( { num : 3 } );
-assert.eq( "bob" , person.name , "update setup 1" );
+person = db.foo.findOne({num: 3});
+assert.eq("bob", person.name, "update setup 1");
person.name = "bob is gone";
-db.foo.update( { num : 3 } , person );
-person = db.foo.findOne( { num : 3 } );
-assert.eq( "bob is gone" , person.name , "update test B" );
+db.foo.update({num: 3}, person);
+person = db.foo.findOne({num: 3});
+assert.eq("bob is gone", person.name, "update test B");
// remove
-assert( db.foo.findOne( { num : 3 } ) != null , "remove test A" );
-db.foo.remove( { num : 3 } );
-assert.isnull( db.foo.findOne( { num : 3 } ) , "remove test B" );
+assert(db.foo.findOne({num: 3}) != null, "remove test A");
+db.foo.remove({num: 3});
+assert.isnull(db.foo.findOne({num: 3}), "remove test B");
-db.foo.save( { num : 3 , name : "eliot2" } );
-person = db.foo.findOne( { num : 3 } );
-assert( person , "remove test C" );
-assert.eq( person.name , "eliot2" );
+db.foo.save({num: 3, name: "eliot2"});
+person = db.foo.findOne({num: 3});
+assert(person, "remove test C");
+assert.eq(person.name, "eliot2");
-db.foo.remove( { _id : person._id } );
-assert.isnull( db.foo.findOne( { num : 3 } ) , "remove test E" );
+db.foo.remove({_id: person._id});
+assert.isnull(db.foo.findOne({num: 3}), "remove test E");
-placeCheck( 8 );
+placeCheck(8);
// more update stuff
printAll();
total = db.foo.find().count();
-var res = assert.writeOK(db.foo.update( {}, { $inc: { x: 1 } }, false, true ));
+var res = assert.writeOK(db.foo.update({}, {$inc: {x: 1}}, false, true));
printAll();
-assert.eq( total , res.nModified, res.toString() );
-
+assert.eq(total, res.nModified, res.toString());
-res = db.foo.update( { num : -1 } , { $inc : { x : 1 } } , false , true );
-assert.eq( 1, res.nModified, res.toString() );
+res = db.foo.update({num: -1}, {$inc: {x: 1}}, false, true);
+assert.eq(1, res.nModified, res.toString());
// ---- move all to the secondary
-assert.eq( 2 , s.onNumShards( "foo" ) , "on 2 shards" );
+assert.eq(2, s.onNumShards("foo"), "on 2 shards");
-secondary.foo.insert( { num : -3 } );
+secondary.foo.insert({num: -3});
-s.adminCommand( { movechunk : "test.foo" , find : { num : -2 } , to : secondary.getMongo().name, _waitForDelete : true } );
-assert.eq( 1 , s.onNumShards( "foo" ) , "on 1 shards" );
+s.adminCommand(
+ {movechunk: "test.foo", find: {num: -2}, to: secondary.getMongo().name, _waitForDelete: true});
+assert.eq(1, s.onNumShards("foo"), "on 1 shards");
-s.adminCommand( { movechunk : "test.foo" , find : { num : -2 } , to : primary.getMongo().name, _waitForDelete : true } );
-assert.eq( 2 , s.onNumShards( "foo" ) , "on 2 shards again" );
-assert.eq( 3 , s.config.chunks.count() , "only 3 chunks" );
+s.adminCommand(
+ {movechunk: "test.foo", find: {num: -2}, to: primary.getMongo().name, _waitForDelete: true});
+assert.eq(2, s.onNumShards("foo"), "on 2 shards again");
+assert.eq(3, s.config.chunks.count(), "only 3 chunks");
-print( "YO : " + tojson( db.runCommand( "serverStatus" ) ) );
+print("YO : " + tojson(db.runCommand("serverStatus")));
s.stop();
diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js
index 3b68d330eca..926b350c7e9 100644
--- a/jstests/sharding/shard3.js
+++ b/jstests/sharding/shard3.js
@@ -1,186 +1,194 @@
(function() {
-// Include helpers for analyzing explain output.
-load("jstests/libs/analyze_plan.js");
-
-var s = new ShardingTest({name: "shard3", shards: 2, mongos: 2, other: { enableBalancer: true }});
-
-s2 = s._mongos[1];
-
-db = s.getDB( "test" );
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
-if (s.configRS) {
- // Ensure that the second mongos will see the movePrimary
- s.configRS.awaitLastOpCommitted();
-}
-
-assert( sh.getBalancerState() , "A1" );
-sh.setBalancerState(false);
-assert( ! sh.getBalancerState() , "A2" );
-sh.setBalancerState(true);
-assert( sh.getBalancerState() , "A3" );
-sh.setBalancerState(false);
-assert( ! sh.getBalancerState() , "A4" );
-
-s.config.databases.find().forEach( printjson );
-
-a = s.getDB( "test" ).foo;
-b = s2.getDB( "test" ).foo;
-
-primary = s.getPrimaryShard( "test" ).getDB( "test" ).foo;
-secondary = s.getOther( primary.name ).getDB( "test" ).foo;
-
-a.save( { num : 1 } );
-a.save( { num : 2 } );
-a.save( { num : 3 } );
-
-assert.eq( 3 , a.find().toArray().length , "normal A" );
-assert.eq( 3 , b.find().toArray().length , "other A" );
-
-assert.eq( 3 , primary.count() , "p1" );
-assert.eq( 0 , secondary.count() , "s1" );
-
-assert.eq( 1 , s.onNumShards( "foo" ) , "on 1 shards" );
-
-s.adminCommand( { split : "test.foo" , middle : { num : 2 } } );
-s.adminCommand( { movechunk : "test.foo",
- find : { num : 3 },
- to : s.getOther( s.getPrimaryShard( "test" ) ).name,
- _waitForDelete : true } );
-
-assert( primary.find().toArray().length > 0 , "blah 1" );
-assert( secondary.find().toArray().length > 0 , "blah 2" );
-assert.eq( 3 , primary.find().itcount() + secondary.find().itcount() , "blah 3" );
-
-assert.eq( 3 , a.find().toArray().length , "normal B" );
-assert.eq( 3 , b.find().toArray().length , "other B" );
-
-printjson( primary._db._adminCommand( "shardingState" ) );
-
-// --- filtering ---
-
-function doCounts( name , total , onlyItCounts ){
- total = total || ( primary.count() + secondary.count() );
- if ( ! onlyItCounts )
- assert.eq( total , a.count() , name + " count" );
- assert.eq( total , a.find().sort( { n : 1 } ).itcount() , name + " itcount - sort n" );
- assert.eq( total , a.find().itcount() , name + " itcount" );
- assert.eq( total , a.find().sort( { _id : 1 } ).itcount() , name + " itcount - sort _id" );
- return total;
-}
-
-var total = doCounts( "before wrong save" );
-assert.writeOK(secondary.insert( { _id : 111 , num : -3 } ));
-doCounts( "after wrong save" , total , true );
-e = a.find().explain("executionStats").executionStats;
-assert.eq( 3 , e.nReturned , "ex1" );
-assert.eq( 0 , e.totalKeysExamined , "ex2" );
-assert.eq( 4 , e.totalDocsExamined , "ex3" );
-
-var chunkSkips = 0;
-for (var shard in e.executionStages.shards) {
- var theShard = e.executionStages.shards[shard];
- chunkSkips += getChunkSkips(theShard.executionStages);
-}
-assert.eq( 1 , chunkSkips , "ex4" );
-
-// SERVER-4612
-// make sure idhack obeys chunks
-x = a.findOne( { _id : 111 } );
-assert( ! x , "idhack didn't obey chunk boundaries " + tojson(x) );
-
-// --- move all to 1 ---
-print( "MOVE ALL TO 1" );
-
-assert.eq( 2 , s.onNumShards( "foo" ) , "on 2 shards" );
-s.printCollectionInfo( "test.foo" );
-
-assert( a.findOne( { num : 1 } ) );
-assert( b.findOne( { num : 1 } ) );
-
-print( "GOING TO MOVE" );
-assert( a.findOne( { num : 1 } ) , "pre move 1" );
-s.printCollectionInfo( "test.foo" );
-myto = s.getOther( s.getPrimaryShard( "test" ) ).name;
-print( "counts before move: " + tojson( s.shardCounts( "foo" ) ) );
-s.adminCommand( { movechunk : "test.foo" , find : { num : 1 } , to : myto, _waitForDelete : true } );
-print( "counts after move: " + tojson( s.shardCounts( "foo" ) ) );
-s.printCollectionInfo( "test.foo" );
-assert.eq( 1 , s.onNumShards( "foo" ) , "on 1 shard again" );
-assert( a.findOne( { num : 1 } ) , "post move 1" );
-assert( b.findOne( { num : 1 } ) , "post move 2" );
-
-print( "*** drop" );
-
-s.printCollectionInfo( "test.foo" , "before drop" );
-a.drop();
-s.printCollectionInfo( "test.foo" , "after drop" );
-
-assert.eq( 0 , a.count() , "a count after drop" );
-assert.eq( 0 , b.count() , "b count after drop" );
-
-s.printCollectionInfo( "test.foo" , "after counts" );
-
-assert.eq( 0 , primary.count() , "p count after drop" );
-assert.eq( 0 , secondary.count() , "s count after drop" );
-
-print( "*** dropDatabase setup" );
-
-s.printShardingStatus();
-s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
-a.save( { num : 2 } );
-a.save( { num : 3 } );
-s.adminCommand( { split : "test.foo" , middle : { num : 2 } } );
-s.adminCommand( { movechunk : "test.foo" ,
- find : { num : 3 } ,
- to : s.getOther( s.getPrimaryShard( "test" ) ).name,
- _waitForDelete : true } );
-s.printShardingStatus();
-
-s.printCollectionInfo( "test.foo" , "after dropDatabase setup" );
-doCounts( "after dropDatabase setup2" );
-s.printCollectionInfo( "test.foo" , "after dropDatabase setup3" );
-
-print( "*** ready to call dropDatabase" );
-res = s.getDB( "test" ).dropDatabase();
-assert.eq( 1 , res.ok , "dropDatabase failed : " + tojson( res ) );
-// Waiting for SERVER-2253
-assert.eq( 0 , s.config.databases.count( { _id: "test" } ) , "database 'test' was dropped but still appears in configDB" );
-
-s.printShardingStatus();
-s.printCollectionInfo( "test.foo" , "after dropDatabase call 1" );
-assert.eq( 0 , doCounts( "after dropDatabase called" ) );
-
-// ---- retry commands SERVER-1471 ----
-
-s.adminCommand( { enablesharding : "test2" } );
-s.ensurePrimaryShard('test2', 'shard0000');
-s.adminCommand( { shardcollection : "test2.foo" , key : { num : 1 } } );
-dba = s.getDB( "test2" );
-dbb = s2.getDB( "test2" );
-dba.foo.save( { num : 1 } );
-dba.foo.save( { num : 2 } );
-dba.foo.save( { num : 3 } );
-
-assert.eq( 1 , s.onNumShards( "foo" , "test2" ) , "B on 1 shards" );
-assert.eq( 3 , dba.foo.count() , "Ba" );
-assert.eq( 3 , dbb.foo.count() , "Bb" );
-
-s.adminCommand( { split : "test2.foo" , middle : { num : 2 } } );
-s.adminCommand( { movechunk : "test2.foo",
- find : { num : 3 } ,
- to : s.getOther( s.getPrimaryShard( "test2" ) ).name,
- _waitForDelete : true } );
-
-assert.eq( 2 , s.onNumShards( "foo" , "test2" ) , "B on 2 shards" );
-
-x = dba.foo.stats();
-printjson( x );
-y = dbb.foo.stats();
-printjson( y );
-
-s.stop();
+ // Include helpers for analyzing explain output.
+ load("jstests/libs/analyze_plan.js");
+
+ var s = new ShardingTest({name: "shard3", shards: 2, mongos: 2, other: {enableBalancer: true}});
+
+ s2 = s._mongos[1];
+
+ db = s.getDB("test");
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
+ if (s.configRS) {
+ // Ensure that the second mongos will see the movePrimary
+ s.configRS.awaitLastOpCommitted();
+ }
+
+ assert(sh.getBalancerState(), "A1");
+ sh.setBalancerState(false);
+ assert(!sh.getBalancerState(), "A2");
+ sh.setBalancerState(true);
+ assert(sh.getBalancerState(), "A3");
+ sh.setBalancerState(false);
+ assert(!sh.getBalancerState(), "A4");
+
+ s.config.databases.find().forEach(printjson);
+
+ a = s.getDB("test").foo;
+ b = s2.getDB("test").foo;
+
+ primary = s.getPrimaryShard("test").getDB("test").foo;
+ secondary = s.getOther(primary.name).getDB("test").foo;
+
+ a.save({num: 1});
+ a.save({num: 2});
+ a.save({num: 3});
+
+ assert.eq(3, a.find().toArray().length, "normal A");
+ assert.eq(3, b.find().toArray().length, "other A");
+
+ assert.eq(3, primary.count(), "p1");
+ assert.eq(0, secondary.count(), "s1");
+
+ assert.eq(1, s.onNumShards("foo"), "on 1 shards");
+
+ s.adminCommand({split: "test.foo", middle: {num: 2}});
+ s.adminCommand({
+ movechunk: "test.foo",
+ find: {num: 3},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+ });
+
+ assert(primary.find().toArray().length > 0, "blah 1");
+ assert(secondary.find().toArray().length > 0, "blah 2");
+ assert.eq(3, primary.find().itcount() + secondary.find().itcount(), "blah 3");
+
+ assert.eq(3, a.find().toArray().length, "normal B");
+ assert.eq(3, b.find().toArray().length, "other B");
+
+ printjson(primary._db._adminCommand("shardingState"));
+
+ // --- filtering ---
+
+ function doCounts(name, total, onlyItCounts) {
+ total = total || (primary.count() + secondary.count());
+ if (!onlyItCounts)
+ assert.eq(total, a.count(), name + " count");
+ assert.eq(total, a.find().sort({n: 1}).itcount(), name + " itcount - sort n");
+ assert.eq(total, a.find().itcount(), name + " itcount");
+ assert.eq(total, a.find().sort({_id: 1}).itcount(), name + " itcount - sort _id");
+ return total;
+ }
+
+ var total = doCounts("before wrong save");
+ assert.writeOK(secondary.insert({_id: 111, num: -3}));
+ doCounts("after wrong save", total, true);
+ e = a.find().explain("executionStats").executionStats;
+ assert.eq(3, e.nReturned, "ex1");
+ assert.eq(0, e.totalKeysExamined, "ex2");
+ assert.eq(4, e.totalDocsExamined, "ex3");
+
+ var chunkSkips = 0;
+ for (var shard in e.executionStages.shards) {
+ var theShard = e.executionStages.shards[shard];
+ chunkSkips += getChunkSkips(theShard.executionStages);
+ }
+ assert.eq(1, chunkSkips, "ex4");
+
+ // SERVER-4612
+ // make sure idhack obeys chunks
+ x = a.findOne({_id: 111});
+ assert(!x, "idhack didn't obey chunk boundaries " + tojson(x));
+
+ // --- move all to 1 ---
+ print("MOVE ALL TO 1");
+
+ assert.eq(2, s.onNumShards("foo"), "on 2 shards");
+ s.printCollectionInfo("test.foo");
+
+ assert(a.findOne({num: 1}));
+ assert(b.findOne({num: 1}));
+
+ print("GOING TO MOVE");
+ assert(a.findOne({num: 1}), "pre move 1");
+ s.printCollectionInfo("test.foo");
+ myto = s.getOther(s.getPrimaryShard("test")).name;
+ print("counts before move: " + tojson(s.shardCounts("foo")));
+ s.adminCommand({movechunk: "test.foo", find: {num: 1}, to: myto, _waitForDelete: true});
+ print("counts after move: " + tojson(s.shardCounts("foo")));
+ s.printCollectionInfo("test.foo");
+ assert.eq(1, s.onNumShards("foo"), "on 1 shard again");
+ assert(a.findOne({num: 1}), "post move 1");
+ assert(b.findOne({num: 1}), "post move 2");
+
+ print("*** drop");
+
+ s.printCollectionInfo("test.foo", "before drop");
+ a.drop();
+ s.printCollectionInfo("test.foo", "after drop");
+
+ assert.eq(0, a.count(), "a count after drop");
+ assert.eq(0, b.count(), "b count after drop");
+
+ s.printCollectionInfo("test.foo", "after counts");
+
+ assert.eq(0, primary.count(), "p count after drop");
+ assert.eq(0, secondary.count(), "s count after drop");
+
+ print("*** dropDatabase setup");
+
+ s.printShardingStatus();
+ s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
+ a.save({num: 2});
+ a.save({num: 3});
+ s.adminCommand({split: "test.foo", middle: {num: 2}});
+ s.adminCommand({
+ movechunk: "test.foo",
+ find: {num: 3},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+ });
+ s.printShardingStatus();
+
+ s.printCollectionInfo("test.foo", "after dropDatabase setup");
+ doCounts("after dropDatabase setup2");
+ s.printCollectionInfo("test.foo", "after dropDatabase setup3");
+
+ print("*** ready to call dropDatabase");
+ res = s.getDB("test").dropDatabase();
+ assert.eq(1, res.ok, "dropDatabase failed : " + tojson(res));
+ // Waiting for SERVER-2253
+ assert.eq(0,
+ s.config.databases.count({_id: "test"}),
+ "database 'test' was dropped but still appears in configDB");
+
+ s.printShardingStatus();
+ s.printCollectionInfo("test.foo", "after dropDatabase call 1");
+ assert.eq(0, doCounts("after dropDatabase called"));
+
+ // ---- retry commands SERVER-1471 ----
+
+ s.adminCommand({enablesharding: "test2"});
+ s.ensurePrimaryShard('test2', 'shard0000');
+ s.adminCommand({shardcollection: "test2.foo", key: {num: 1}});
+ dba = s.getDB("test2");
+ dbb = s2.getDB("test2");
+ dba.foo.save({num: 1});
+ dba.foo.save({num: 2});
+ dba.foo.save({num: 3});
+
+ assert.eq(1, s.onNumShards("foo", "test2"), "B on 1 shards");
+ assert.eq(3, dba.foo.count(), "Ba");
+ assert.eq(3, dbb.foo.count(), "Bb");
+
+ s.adminCommand({split: "test2.foo", middle: {num: 2}});
+ s.adminCommand({
+ movechunk: "test2.foo",
+ find: {num: 3},
+ to: s.getOther(s.getPrimaryShard("test2")).name,
+ _waitForDelete: true
+ });
+
+ assert.eq(2, s.onNumShards("foo", "test2"), "B on 2 shards");
+
+ x = dba.foo.stats();
+ printjson(x);
+ y = dbb.foo.stats();
+ printjson(y);
+
+ s.stop();
})();
diff --git a/jstests/sharding/shard4.js b/jstests/sharding/shard4.js
index bf91b816607..76b9394cb19 100644
--- a/jstests/sharding/shard4.js
+++ b/jstests/sharding/shard4.js
@@ -4,53 +4,57 @@ s = new ShardingTest({name: "shard4", shards: 2, mongos: 2});
s2 = s._mongos[1];
-s.adminCommand( { enablesharding : "test" } );
+s.adminCommand({enablesharding: "test"});
s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
if (s.configRS) {
// Ensure that the second mongos will see the movePrimary
s.configRS.awaitLastOpCommitted();
}
-s.getDB( "test" ).foo.save( { num : 1 } );
-s.getDB( "test" ).foo.save( { num : 2 } );
-s.getDB( "test" ).foo.save( { num : 3 } );
-s.getDB( "test" ).foo.save( { num : 4 } );
-s.getDB( "test" ).foo.save( { num : 5 } );
-s.getDB( "test" ).foo.save( { num : 6 } );
-s.getDB( "test" ).foo.save( { num : 7 } );
-
-assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal A" );
-assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other A" );
-
-s.adminCommand( { split : "test.foo" , middle : { num : 4 } } );
-s.adminCommand( { movechunk : "test.foo",
- find : { num : 3 },
- to : s.getOther( s.getPrimaryShard( "test" ) ).name,
- _waitForDelete : true } );
-
-assert( s._connections[0].getDB( "test" ).foo.find().toArray().length > 0 , "blah 1" );
-assert( s._connections[1].getDB( "test" ).foo.find().toArray().length > 0 , "blah 2" );
-assert.eq( 7 , s._connections[0].getDB( "test" ).foo.find().toArray().length +
- s._connections[1].getDB( "test" ).foo.find().toArray().length , "blah 3" );
-
-assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B" );
-assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B" );
-
-s.adminCommand( { split : "test.foo" , middle : { num : 2 } } );
+s.getDB("test").foo.save({num: 1});
+s.getDB("test").foo.save({num: 2});
+s.getDB("test").foo.save({num: 3});
+s.getDB("test").foo.save({num: 4});
+s.getDB("test").foo.save({num: 5});
+s.getDB("test").foo.save({num: 6});
+s.getDB("test").foo.save({num: 7});
+
+assert.eq(7, s.getDB("test").foo.find().toArray().length, "normal A");
+assert.eq(7, s2.getDB("test").foo.find().toArray().length, "other A");
+
+s.adminCommand({split: "test.foo", middle: {num: 4}});
+s.adminCommand({
+ movechunk: "test.foo",
+ find: {num: 3},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+});
+
+assert(s._connections[0].getDB("test").foo.find().toArray().length > 0, "blah 1");
+assert(s._connections[1].getDB("test").foo.find().toArray().length > 0, "blah 2");
+assert.eq(7,
+ s._connections[0].getDB("test").foo.find().toArray().length +
+ s._connections[1].getDB("test").foo.find().toArray().length,
+ "blah 3");
+
+assert.eq(7, s.getDB("test").foo.find().toArray().length, "normal B");
+assert.eq(7, s2.getDB("test").foo.find().toArray().length, "other B");
+
+s.adminCommand({split: "test.foo", middle: {num: 2}});
s.printChunks();
-print( "* A" );
+print("* A");
-assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B 1" );
-assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B 2" );
-print( "* B" );
-assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B 3" );
-assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B 4" );
+assert.eq(7, s.getDB("test").foo.find().toArray().length, "normal B 1");
+assert.eq(7, s2.getDB("test").foo.find().toArray().length, "other B 2");
+print("* B");
+assert.eq(7, s.getDB("test").foo.find().toArray().length, "normal B 3");
+assert.eq(7, s2.getDB("test").foo.find().toArray().length, "other B 4");
-for ( var i=0; i<10; i++ ){
- print( "* C " + i );
- assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B " + i );
+for (var i = 0; i < 10; i++) {
+ print("* C " + i);
+ assert.eq(7, s2.getDB("test").foo.find().toArray().length, "other B " + i);
}
s.stop();
diff --git a/jstests/sharding/shard5.js b/jstests/sharding/shard5.js
index c88cd355d73..c4f05d610cd 100644
--- a/jstests/sharding/shard5.js
+++ b/jstests/sharding/shard5.js
@@ -2,59 +2,61 @@
// tests write passthrough
-s = new ShardingTest({name: "shard5", shards: 2, mongos:2});
+s = new ShardingTest({name: "shard5", shards: 2, mongos: 2});
s.stopBalancer();
s2 = s._mongos[1];
-s.adminCommand( { enablesharding : "test" } );
+s.adminCommand({enablesharding: "test"});
s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { num : 1 } } );
+s.adminCommand({shardcollection: "test.foo", key: {num: 1}});
if (s.configRS) {
// Ensure that the second mongos will see the movePrimary
s.configRS.awaitLastOpCommitted();
}
-s.getDB( "test" ).foo.save( { num : 1 } );
-s.getDB( "test" ).foo.save( { num : 2 } );
-s.getDB( "test" ).foo.save( { num : 3 } );
-s.getDB( "test" ).foo.save( { num : 4 } );
-s.getDB( "test" ).foo.save( { num : 5 } );
-s.getDB( "test" ).foo.save( { num : 6 } );
-s.getDB( "test" ).foo.save( { num : 7 } );
-
-assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal A" );
-assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other A" );
-
-s.adminCommand( { split : "test.foo" , middle : { num : 4 } } );
-s.adminCommand( { movechunk : "test.foo",
- find : { num : 3 },
- to : s.getOther( s.getPrimaryShard( "test" ) ).name,
- _waitForDelete : true } );
-
-assert( s._connections[0].getDB( "test" ).foo.find().toArray().length > 0 , "blah 1" );
-assert( s._connections[1].getDB( "test" ).foo.find().toArray().length > 0 , "blah 2" );
-assert.eq( 7 , s._connections[0].getDB( "test" ).foo.find().toArray().length +
- s._connections[1].getDB( "test" ).foo.find().toArray().length , "blah 3" );
-
-assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B" );
-assert.eq( 7 , s2.getDB( "test" ).foo.find().toArray().length , "other B" );
-
-s.adminCommand( { split : "test.foo" , middle : { num : 2 } } );
+s.getDB("test").foo.save({num: 1});
+s.getDB("test").foo.save({num: 2});
+s.getDB("test").foo.save({num: 3});
+s.getDB("test").foo.save({num: 4});
+s.getDB("test").foo.save({num: 5});
+s.getDB("test").foo.save({num: 6});
+s.getDB("test").foo.save({num: 7});
+
+assert.eq(7, s.getDB("test").foo.find().toArray().length, "normal A");
+assert.eq(7, s2.getDB("test").foo.find().toArray().length, "other A");
+
+s.adminCommand({split: "test.foo", middle: {num: 4}});
+s.adminCommand({
+ movechunk: "test.foo",
+ find: {num: 3},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ _waitForDelete: true
+});
+
+assert(s._connections[0].getDB("test").foo.find().toArray().length > 0, "blah 1");
+assert(s._connections[1].getDB("test").foo.find().toArray().length > 0, "blah 2");
+assert.eq(7,
+ s._connections[0].getDB("test").foo.find().toArray().length +
+ s._connections[1].getDB("test").foo.find().toArray().length,
+ "blah 3");
+
+assert.eq(7, s.getDB("test").foo.find().toArray().length, "normal B");
+assert.eq(7, s2.getDB("test").foo.find().toArray().length, "other B");
+
+s.adminCommand({split: "test.foo", middle: {num: 2}});
s.printChunks();
-print( "* A" );
-
-assert.eq( 7 , s.getDB( "test" ).foo.find().toArray().length , "normal B 1" );
+print("* A");
-s2.getDB( "test" ).foo.save( { num : 2 } );
+assert.eq(7, s.getDB("test").foo.find().toArray().length, "normal B 1");
-assert.soon(
- function(){
- return 8 == s2.getDB( "test" ).foo.find().toArray().length;
- } , "other B 2" , 5000 , 100 );
+s2.getDB("test").foo.save({num: 2});
-assert.eq( 2 , s.onNumShards( "foo" ) , "on 2 shards" );
+assert.soon(function() {
+ return 8 == s2.getDB("test").foo.find().toArray().length;
+}, "other B 2", 5000, 100);
+assert.eq(2, s.onNumShards("foo"), "on 2 shards");
s.stop();
diff --git a/jstests/sharding/shard6.js b/jstests/sharding/shard6.js
index abc0b5adb31..2e0643189d1 100644
--- a/jstests/sharding/shard6.js
+++ b/jstests/sharding/shard6.js
@@ -4,118 +4,122 @@ summary = "";
s = new ShardingTest({name: "shard6", shards: 2});
-s.config.settings.update( { _id: "balancer" }, { $set : { stopped: true } } , true );
+s.config.settings.update({_id: "balancer"}, {$set: {stopped: true}}, true);
-s.adminCommand( { enablesharding : "test" } );
+s.adminCommand({enablesharding: "test"});
s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.data" , key : { num : 1 } } );
+s.adminCommand({shardcollection: "test.data", key: {num: 1}});
-db = s.getDB( "test" );
+db = s.getDB("test");
-function poolStats( where ){
+function poolStats(where) {
var total = 0;
var msg = "poolStats " + where + " ";
- var x = db.runCommand( "connPoolStats" ).hosts;
- for ( var h in x ){
+ var x = db.runCommand("connPoolStats").hosts;
+ for (var h in x) {
var z = x[h];
msg += z.created + " ";
total += z.created;
}
- printjson( x );
- print( "****\n" + msg + "\n*****" );
+ printjson(x);
+ print("****\n" + msg + "\n*****");
summary += msg + "\n";
return [total, x.length];
}
-poolStats( "at start" );
+poolStats("at start");
// we want a lot of data, so lets make a 50k string to cheat :)
bigString = "";
-while ( bigString.length < 50000 )
+while (bigString.length < 50000)
bigString += "this is a big string. ";
// ok, now lets insert a some data
var num = 0;
-for ( ; num<100; num++ ){
- db.data.save( { num : num , bigString : bigString } );
+for (; num < 100; num++) {
+ db.data.save({num: num, bigString: bigString});
}
-assert.eq( 100 , db.data.find().toArray().length , "basic find after setup" );
+assert.eq(100, db.data.find().toArray().length, "basic find after setup");
-connBefore = poolStats( "setup done" );
+connBefore = poolStats("setup done");
// limit
-assert.eq( 77 , db.data.find().limit(77).itcount() , "limit test 1" );
-assert.eq( 1 , db.data.find().limit(1).itcount() , "limit test 2" );
-for ( var i=1; i<10; i++ ){
- assert.eq( i , db.data.find().limit(i).itcount() , "limit test 3a : " + i );
- assert.eq( i , db.data.find().skip(i).limit(i).itcount() , "limit test 3b : " + i );
- poolStats( "after loop : " + i );
+assert.eq(77, db.data.find().limit(77).itcount(), "limit test 1");
+assert.eq(1, db.data.find().limit(1).itcount(), "limit test 2");
+for (var i = 1; i < 10; i++) {
+ assert.eq(i, db.data.find().limit(i).itcount(), "limit test 3a : " + i);
+ assert.eq(i, db.data.find().skip(i).limit(i).itcount(), "limit test 3b : " + i);
+ poolStats("after loop : " + i);
}
// we do not want the number of connections from mongos to mongod to increase
// but it may have because of the background replica set monitor, and that case is ok.
// This is due to SERVER-22564.
-limitTestAfterConns = poolStats( "limit test done" );
+limitTestAfterConns = poolStats("limit test done");
// only check the number of connections is the same if the number of hosts we are connected to
// remains the same. TODO: remove host count check after SERVER-22564 is fixed.
-if( limitTestAfterConns[1] == connBefore[1]) {
- assert.eq( connBefore[0] , limitTestAfterConns[0], "limit test conns" );
+if (limitTestAfterConns[1] == connBefore[1]) {
+ assert.eq(connBefore[0], limitTestAfterConns[0], "limit test conns");
}
-function assertOrder( start , num ){
- var a = db.data.find().skip(start).limit(num).sort( { num : 1 } ).map( function(z){ return z.num; } );
+function assertOrder(start, num) {
+ var a = db.data.find().skip(start).limit(num).sort({num: 1}).map(function(z) {
+ return z.num;
+ });
var c = [];
- for ( var i=0; i<num; i++ )
- c.push( start + i );
- assert.eq( c , a , "assertOrder start: " + start + " num: " + num );
+ for (var i = 0; i < num; i++)
+ c.push(start + i);
+ assert.eq(c, a, "assertOrder start: " + start + " num: " + num);
}
-assertOrder( 0 , 10 );
-assertOrder( 5 , 10 );
+assertOrder(0, 10);
+assertOrder(5, 10);
-poolStats( "after checking order" );
+poolStats("after checking order");
-function doItCount( skip , sort , batchSize ){
+function doItCount(skip, sort, batchSize) {
var c = db.data.find();
- if ( skip )
- c.skip( skip );
- if ( sort )
- c.sort( sort );
- if ( batchSize )
- c.batchSize( batchSize );
+ if (skip)
+ c.skip(skip);
+ if (sort)
+ c.sort(sort);
+ if (batchSize)
+ c.batchSize(batchSize);
return c.itcount();
}
-function checkItCount( batchSize ){
- assert.eq( 5 , doItCount( num - 5 , null , batchSize ) , "skip 1 " + batchSize );
- assert.eq( 5 , doItCount( num - 5 , { num : 1 } , batchSize ) , "skip 2 " + batchSize );
- assert.eq( 5 , doItCount( num - 5 , { _id : 1 } , batchSize ) , "skip 3 " + batchSize );
- assert.eq( 0 , doItCount( num + 5 , { num : 1 } , batchSize ) , "skip 4 " + batchSize );
- assert.eq( 0 , doItCount( num + 5 , { _id : 1 } , batchSize ) , "skip 5 " + batchSize );
+function checkItCount(batchSize) {
+ assert.eq(5, doItCount(num - 5, null, batchSize), "skip 1 " + batchSize);
+ assert.eq(5, doItCount(num - 5, {num: 1}, batchSize), "skip 2 " + batchSize);
+ assert.eq(5, doItCount(num - 5, {_id: 1}, batchSize), "skip 3 " + batchSize);
+ assert.eq(0, doItCount(num + 5, {num: 1}, batchSize), "skip 4 " + batchSize);
+ assert.eq(0, doItCount(num + 5, {_id: 1}, batchSize), "skip 5 " + batchSize);
}
-poolStats( "before checking itcount" );
+poolStats("before checking itcount");
-checkItCount( 0 );
-checkItCount( 2 );
+checkItCount(0);
+checkItCount(2);
-poolStats( "after checking itcount" );
+poolStats("after checking itcount");
// --- test save support ---
o = db.data.findOne();
o.x = 16;
-db.data.save( o );
-o = db.data.findOne( { _id : o._id } );
-assert.eq( 16 , o.x , "x1 - did save fail? " + tojson(o) );
+db.data.save(o);
+o = db.data.findOne({_id: o._id});
+assert.eq(16, o.x, "x1 - did save fail? " + tojson(o));
-poolStats( "at end" );
+poolStats("at end");
-print( summary );
+print(summary);
-assert.throws( function(){ s.adminCommand( { enablesharding : "admin" } ); } );
+assert.throws(function() {
+ s.adminCommand({enablesharding: "admin"});
+});
s.stop();
diff --git a/jstests/sharding/shard7.js b/jstests/sharding/shard7.js
index e371ba0ffb7..ce0ce708784 100644
--- a/jstests/sharding/shard7.js
+++ b/jstests/sharding/shard7.js
@@ -3,49 +3,52 @@
s = new ShardingTest({name: 'shard7', shards: 2});
-db = s.admin._mongo.getDB( 'test' );
-c = db[ 'foo' ];
+db = s.admin._mongo.getDB('test');
+c = db['foo'];
c.drop();
-s.adminCommand( { enablesharding: '' + db } );
+s.adminCommand({enablesharding: '' + db});
s.ensurePrimaryShard(db.getName(), 'shard0001');
-s.adminCommand( { shardcollection: '' + c, key: { a:1,b:1 } } );
+s.adminCommand({shardcollection: '' + c, key: {a: 1, b: 1}});
// Check query operation with some satisfiable and unsatisfiable queries.
-assert.eq( 0, c.find({a:1}).itcount() );
-assert.eq( 0, c.find({a:1,b:1}).itcount() );
-assert.eq( 0, c.find({a:{$gt:4,$lt:2}}).itcount() );
-assert.eq( 0, c.find({a:1,b:{$gt:4,$lt:2}}).itcount() );
-assert.eq( 0, c.find({a:{$gt:0,$lt:2},b:{$gt:4,$lt:2}}).itcount() );
-assert.eq( 0, c.find({b:{$gt:4,$lt:2}}).itcount() );
-assert.eq( 0, c.find({a:{$in:[]}}).itcount() );
-assert.eq( 0, c.find({a:1,b:{$in:[]}}).itcount() );
+assert.eq(0, c.find({a: 1}).itcount());
+assert.eq(0, c.find({a: 1, b: 1}).itcount());
+assert.eq(0, c.find({a: {$gt: 4, $lt: 2}}).itcount());
+assert.eq(0, c.find({a: 1, b: {$gt: 4, $lt: 2}}).itcount());
+assert.eq(0, c.find({a: {$gt: 0, $lt: 2}, b: {$gt: 4, $lt: 2}}).itcount());
+assert.eq(0, c.find({b: {$gt: 4, $lt: 2}}).itcount());
+assert.eq(0, c.find({a: {$in: []}}).itcount());
+assert.eq(0, c.find({a: 1, b: {$in: []}}).itcount());
-assert.eq( 0, c.find({$or:[{a:{$gt:0,$lt:10}},{a:12}]}).itcount() );
-assert.eq( 0, c.find({$or:[{a:{$gt:0,$lt:10}},{a:5}]}).itcount() );
-assert.eq( 0, c.find({$or:[{a:1,b:{$gt:0,$lt:10}},{a:1,b:5}]}).itcount() );
+assert.eq(0, c.find({$or: [{a: {$gt: 0, $lt: 10}}, {a: 12}]}).itcount());
+assert.eq(0, c.find({$or: [{a: {$gt: 0, $lt: 10}}, {a: 5}]}).itcount());
+assert.eq(0, c.find({$or: [{a: 1, b: {$gt: 0, $lt: 10}}, {a: 1, b: 5}]}).itcount());
// Check other operations that use getShardsForQuery.
-unsatisfiable = {a:1,b:{$gt:4,$lt:2}};
-
-assert.eq( 0, c.count(unsatisfiable) );
-assert.eq( [], c.distinct('a',unsatisfiable) );
-
-aggregate = c.aggregate( { $match:unsatisfiable } );
-assert.eq( 0, aggregate.toArray().length );
-
-c.save( {a:null,b:null} );
-c.save( {a:1,b:1} );
-assert.writeOK( c.remove( unsatisfiable ));
-assert.eq( 2, c.count() );
-assert.writeOK( c.update( unsatisfiable, {$set:{c:1}}, false, true ));
-assert.eq( 2, c.count() );
-assert.eq( 0, c.count( {c:1} ) );
-
-c.ensureIndex( {loc:'2d'} );
-c.save( {a:2,b:2,loc:[0,0]} );
-near = db.runCommand( {geoNear:'foo', near:[0,0], query:unsatisfiable} );
-assert.commandWorked( near );
-assert.eq( 0, near.results.length );
+unsatisfiable = {
+ a: 1,
+ b: {$gt: 4, $lt: 2}
+};
+
+assert.eq(0, c.count(unsatisfiable));
+assert.eq([], c.distinct('a', unsatisfiable));
+
+aggregate = c.aggregate({$match: unsatisfiable});
+assert.eq(0, aggregate.toArray().length);
+
+c.save({a: null, b: null});
+c.save({a: 1, b: 1});
+assert.writeOK(c.remove(unsatisfiable));
+assert.eq(2, c.count());
+assert.writeOK(c.update(unsatisfiable, {$set: {c: 1}}, false, true));
+assert.eq(2, c.count());
+assert.eq(0, c.count({c: 1}));
+
+c.ensureIndex({loc: '2d'});
+c.save({a: 2, b: 2, loc: [0, 0]});
+near = db.runCommand({geoNear: 'foo', near: [0, 0], query: unsatisfiable});
+assert.commandWorked(near);
+assert.eq(0, near.results.length);
diff --git a/jstests/sharding/shard_collection_basic.js b/jstests/sharding/shard_collection_basic.js
index d83ae01a39c..769c2fc8163 100644
--- a/jstests/sharding/shard_collection_basic.js
+++ b/jstests/sharding/shard_collection_basic.js
@@ -3,179 +3,153 @@
//
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({mongos:1, shards:2});
-var kDbName = 'db';
-var mongos = st.s0;
+ var st = new ShardingTest({mongos: 1, shards: 2});
+ var kDbName = 'db';
+ var mongos = st.s0;
-function testAndClenaupWithKeyNoIndexFailed(keyDoc) {
- assert.commandWorked(mongos.adminCommand({enableSharding : kDbName}));
+ function testAndClenaupWithKeyNoIndexFailed(keyDoc) {
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- var ns = kDbName + '.foo';
- assert.commandFailed(mongos.adminCommand({
- shardCollection: ns,
- key: keyDoc
- }));
+ var ns = kDbName + '.foo';
+ assert.commandFailed(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
- assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
- mongos.getDB(kDbName).dropDatabase();
-}
+ assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
+ mongos.getDB(kDbName).dropDatabase();
+ }
-function testAndClenaupWithKeyOK(keyDoc) {
- assert.commandWorked(mongos.adminCommand({enableSharding : kDbName}));
- assert.commandWorked(mongos.getDB(kDbName).foo.createIndex(keyDoc));
+ function testAndClenaupWithKeyOK(keyDoc) {
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
+ assert.commandWorked(mongos.getDB(kDbName).foo.createIndex(keyDoc));
- var ns = kDbName + '.foo';
- assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
+ var ns = kDbName + '.foo';
+ assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
- assert.commandWorked(mongos.adminCommand({
- shardCollection: ns,
- key: keyDoc
- }));
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
- assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 1);
- mongos.getDB(kDbName).dropDatabase();
-}
+ assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 1);
+ mongos.getDB(kDbName).dropDatabase();
+ }
-function testAndClenaupWithKeyNoIndexOK(keyDoc) {
- assert.commandWorked(mongos.adminCommand({enableSharding : kDbName}));
+ function testAndClenaupWithKeyNoIndexOK(keyDoc) {
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
- var ns = kDbName + '.foo';
- assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
+ var ns = kDbName + '.foo';
+ assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
- assert.commandWorked(mongos.adminCommand({
- shardCollection: ns,
- key: keyDoc
- }));
+ assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
- assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 1);
- mongos.getDB(kDbName).dropDatabase();
-}
+ assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 1);
+ mongos.getDB(kDbName).dropDatabase();
+ }
-// Fail if db is not sharded.
-assert.commandFailed(mongos.adminCommand({ shardCollection: kDbName + '.foo', key: {_id:1} }));
+ // Fail if db is not sharded.
+ assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
-// Fail if db is not sharding enabled.
-assert.commandFailed(mongos.adminCommand({ shardCollection: kDbName + '.foo', key: {_id:1} }));
+ // Fail if db is not sharding enabled.
+ assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
-assert.commandWorked(mongos.adminCommand({enableSharding : kDbName}));
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
-// Verify wrong arguments errors.
-assert.commandFailed(mongos.adminCommand({ shardCollection: 'foo', key: {_id:1} }));
+ // Verify wrong arguments errors.
+ assert.commandFailed(mongos.adminCommand({shardCollection: 'foo', key: {_id: 1}}));
-assert.commandFailed(
- mongos.adminCommand({ shardCollection: 'foo', key: "aaa" })
-);
+ assert.commandFailed(mongos.adminCommand({shardCollection: 'foo', key: "aaa"}));
-// shardCollection may only be run against admin database.
-assert.commandFailed(
- mongos.getDB('test').runCommand({ shardCollection: kDbName + '.foo', key: {_id:1} }));
+ // shardCollection may only be run against admin database.
+ assert.commandFailed(
+ mongos.getDB('test').runCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
-// Can't shard if key is not specified.
-assert.commandFailed(mongos.adminCommand({
- shardCollection: kDbName + '.foo' }));
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+ // Can't shard if key is not specified.
+ assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo'}));
-assert.commandFailed(mongos.adminCommand({
- shardCollection: kDbName + '.foo',
- key: {}
-}));
+ assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {}}));
-// Verify key format
-assert.commandFailed(mongos.adminCommand({
- shardCollection: kDbName + '.foo',
- key: {aKey: "hahahashed"}
-}));
+ // Verify key format
+ assert.commandFailed(
+ mongos.adminCommand({shardCollection: kDbName + '.foo', key: {aKey: "hahahashed"}}));
-// Error if a collection is already sharded.
-assert.commandWorked(mongos.adminCommand({
- shardCollection: kDbName + '.foo',
- key: {_id:1}
-}));
+ // Error if a collection is already sharded.
+ assert.commandWorked(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
-assert.commandFailed(mongos.adminCommand({ shardCollection: kDbName + '.foo', key: {_id:1} }));
+ assert.commandFailed(mongos.adminCommand({shardCollection: kDbName + '.foo', key: {_id: 1}}));
-mongos.getDB(kDbName).dropDatabase();
+ mongos.getDB(kDbName).dropDatabase();
-// Shard empty collections no index required.
-testAndClenaupWithKeyNoIndexOK({_id:1});
-testAndClenaupWithKeyNoIndexOK({_id:'hashed'});
+ // Shard empty collections no index required.
+ testAndClenaupWithKeyNoIndexOK({_id: 1});
+ testAndClenaupWithKeyNoIndexOK({_id: 'hashed'});
-// Shard by a plain key.
-testAndClenaupWithKeyNoIndexOK({a:1});
+ // Shard by a plain key.
+ testAndClenaupWithKeyNoIndexOK({a: 1});
-// Cant shard collection with data and no index on the shard key.
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
-testAndClenaupWithKeyNoIndexFailed({a:1});
+ // Cant shard collection with data and no index on the shard key.
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+ testAndClenaupWithKeyNoIndexFailed({a: 1});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
-testAndClenaupWithKeyOK({a:1});
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+ testAndClenaupWithKeyOK({a: 1});
-// Shard by a hashed key.
-testAndClenaupWithKeyNoIndexOK({a:'hashed'});
+ // Shard by a hashed key.
+ testAndClenaupWithKeyNoIndexOK({a: 'hashed'});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
-testAndClenaupWithKeyNoIndexFailed({a:'hashed'});
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+ testAndClenaupWithKeyNoIndexFailed({a: 'hashed'});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
-testAndClenaupWithKeyOK({a:'hashed'});
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+ testAndClenaupWithKeyOK({a: 'hashed'});
-// Shard by a compound key.
-testAndClenaupWithKeyNoIndexOK({x:1, y:1});
+ // Shard by a compound key.
+ testAndClenaupWithKeyNoIndexOK({x: 1, y: 1});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
-testAndClenaupWithKeyNoIndexFailed({x:1, y:1});
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
+ testAndClenaupWithKeyNoIndexFailed({x: 1, y: 1});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
-testAndClenaupWithKeyOK({x:1, y:1});
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({x: 1, y: 1}));
+ testAndClenaupWithKeyOK({x: 1, y: 1});
-testAndClenaupWithKeyNoIndexFailed({x:'hashed', y:1});
-testAndClenaupWithKeyNoIndexFailed({x:'hashed', y:'hashed'});
+ testAndClenaupWithKeyNoIndexFailed({x: 'hashed', y: 1});
+ testAndClenaupWithKeyNoIndexFailed({x: 'hashed', y: 'hashed'});
-// Shard by a key component.
-testAndClenaupWithKeyOK({'z.x':1});
-testAndClenaupWithKeyOK({'z.x':'hashed'});
+ // Shard by a key component.
+ testAndClenaupWithKeyOK({'z.x': 1});
+ testAndClenaupWithKeyOK({'z.x': 'hashed'});
-// Can't shard by a multikey.
-assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a:1}));
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1,2,3,4,5], b:1}));
-testAndClenaupWithKeyNoIndexFailed({a:1});
+ // Can't shard by a multikey.
+ assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1}));
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: 1}));
+ testAndClenaupWithKeyNoIndexFailed({a: 1});
-assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a:1, b:1}));
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1,2,3,4,5], b:1}));
-testAndClenaupWithKeyNoIndexFailed({a:1, b:1});
+ assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({a: 1, b: 1}));
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: 1}));
+ testAndClenaupWithKeyNoIndexFailed({a: 1, b: 1});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
-testAndClenaupWithKeyNoIndexFailed({a:'hashed'});
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+ testAndClenaupWithKeyNoIndexFailed({a: 'hashed'});
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
-testAndClenaupWithKeyOK({a:'hashed'});
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: 1, b: 1}));
+ testAndClenaupWithKeyOK({a: 'hashed'});
-// Cant shard by a parallel arrays.
-assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1,2,3,4,5], b: [1,2,3,4,5]}));
-testAndClenaupWithKeyNoIndexFailed({a:1, b:1});
+ // Cant shard by a parallel arrays.
+ assert.writeOK(mongos.getDB(kDbName).foo.insert({a: [1, 2, 3, 4, 5], b: [1, 2, 3, 4, 5]}));
+ testAndClenaupWithKeyNoIndexFailed({a: 1, b: 1});
-assert.commandWorked(mongos.adminCommand({enableSharding : kDbName}));
+ assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
-// Can't shard on unique hashed key.
-assert.commandFailed(mongos.adminCommand({
- shardCollection: kDbName + '.foo',
- key: {aKey:"hashed"},
- unique: true
-}));
+ // Can't shard on unique hashed key.
+ assert.commandFailed(mongos.adminCommand(
+ {shardCollection: kDbName + '.foo', key: {aKey: "hashed"}, unique: true}));
-// If shardCollection has unique:true it must have a unique index.
-assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({aKey:1}));
+ // If shardCollection has unique:true it must have a unique index.
+ assert.commandWorked(mongos.getDB(kDbName).foo.createIndex({aKey: 1}));
-assert.commandFailed(mongos.adminCommand({
- shardCollection: kDbName + '.foo',
- key: {aKey:1},
- unique: true
-}));
+ assert.commandFailed(
+ mongos.adminCommand({shardCollection: kDbName + '.foo', key: {aKey: 1}, unique: true}));
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/shard_existing.js b/jstests/sharding/shard_existing.js
index 9473ae62a03..a1b328cfe1f 100644
--- a/jstests/sharding/shard_existing.js
+++ b/jstests/sharding/shard_existing.js
@@ -1,42 +1,38 @@
(function() {
-var s = new ShardingTest({ name: "shard_existing",
- shards: 2,
- mongos: 1,
- other: { chunkSize: 1 } });
+ var s = new ShardingTest({name: "shard_existing", shards: 2, mongos: 1, other: {chunkSize: 1}});
-db = s.getDB( "test" );
+ db = s.getDB("test");
-var stringSize = 10000;
-var numDocs = 2000;
+ var stringSize = 10000;
+ var numDocs = 2000;
+ // we want a lot of data, so lets make a string to cheat :)
+ var bigString = new Array(stringSize).toString();
+ var docSize = Object.bsonsize({_id: numDocs, s: bigString});
+ var totalSize = docSize * numDocs;
+ print("NumDocs: " + numDocs + " DocSize: " + docSize + " TotalSize: " + totalSize);
-// we want a lot of data, so lets make a string to cheat :)
-var bigString = new Array(stringSize).toString();
-var docSize = Object.bsonsize({ _id: numDocs, s: bigString });
-var totalSize = docSize * numDocs;
-print("NumDocs: " + numDocs + " DocSize: " + docSize + " TotalSize: " + totalSize);
+ var bulk = db.data.initializeUnorderedBulkOp();
+ for (i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, s: bigString});
+ }
+ assert.writeOK(bulk.execute());
-var bulk = db.data.initializeUnorderedBulkOp();
-for (i=0; i<numDocs; i++) {
- bulk.insert({_id: i, s: bigString});
-}
-assert.writeOK(bulk.execute());
+ var avgObjSize = db.data.stats().avgObjSize;
+ var dataSize = db.data.stats().size;
+ assert.lte(totalSize, dataSize);
-var avgObjSize = db.data.stats().avgObjSize;
-var dataSize = db.data.stats().size;
-assert.lte(totalSize, dataSize);
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ res = s.adminCommand({shardcollection: "test.data", key: {_id: 1}});
+ printjson(res);
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-res = s.adminCommand( { shardcollection : "test.data" , key : { _id : 1 } } );
-printjson(res);
+ // number of chunks should be approx equal to the total data size / half the chunk size
+ var numChunks = s.config.chunks.find().itcount();
+ var guess = Math.ceil(dataSize / (512 * 1024 + avgObjSize));
+ assert(Math.abs(numChunks - guess) < 2, "not right number of chunks");
-// number of chunks should be approx equal to the total data size / half the chunk size
-var numChunks = s.config.chunks.find().itcount();
-var guess = Math.ceil(dataSize / (512*1024 + avgObjSize));
-assert( Math.abs( numChunks - guess ) < 2, "not right number of chunks" );
-
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js
index 8df1b9caa8f..09ea5b5ec46 100644
--- a/jstests/sharding/shard_insert_getlasterror_w2.js
+++ b/jstests/sharding/shard_insert_getlasterror_w2.js
@@ -18,11 +18,11 @@
// Spin up a sharded cluster, but do not add the shards
var shardingTestConfig = {
- name : baseName,
- mongos : 1,
- shards : 1,
- rs : { nodes : replNodes },
- other : { manualAddShard : true }
+ name: baseName,
+ mongos: 1,
+ shards: 1,
+ rs: {nodes: replNodes},
+ other: {manualAddShard: true}
};
var shardingTest = new ShardingTest(shardingTestConfig);
@@ -33,7 +33,7 @@
var testDBReplSet1 = replSet1.getPrimary().getDB(testDBName);
var bulk = testDBReplSet1.foo.initializeUnorderedBulkOp();
for (var i = 0; i < numDocs; i++) {
- bulk.insert({ x: i, text: textString });
+ bulk.insert({x: i, text: textString});
}
assert.writeOK(bulk.execute());
@@ -42,44 +42,41 @@
var testDB = mongosConn.getDB(testDBName);
// Add replSet1 as only shard
- mongosConn.adminCommand({ addshard : replSet1.getURL() });
+ mongosConn.adminCommand({addshard: replSet1.getURL()});
// Enable sharding on test db and its collection foo
- assert.commandWorked(mongosConn.getDB('admin').runCommand({ enablesharding : testDBName }));
- testDB[testCollName].ensureIndex({ x : 1 });
- assert.commandWorked(mongosConn.getDB('admin').
- runCommand({ shardcollection : testDBName + '.' + testCollName,
- key : { x : 1 }}));
+ assert.commandWorked(mongosConn.getDB('admin').runCommand({enablesharding: testDBName}));
+ testDB[testCollName].ensureIndex({x: 1});
+ assert.commandWorked(mongosConn.getDB('admin').runCommand(
+ {shardcollection: testDBName + '.' + testCollName, key: {x: 1}}));
// Test case where GLE should return an error
- testDB.foo.insert({_id:'a', x:1});
- assert.writeError(testDB.foo.insert({ _id: 'a', x: 1 },
- { writeConcern: { w: 2, wtimeout: 30000 }}));
+ testDB.foo.insert({_id: 'a', x: 1});
+ assert.writeError(testDB.foo.insert({_id: 'a', x: 1}, {writeConcern: {w: 2, wtimeout: 30000}}));
// Add more data
bulk = testDB.foo.initializeUnorderedBulkOp();
for (var i = numDocs; i < 2 * numDocs; i++) {
- bulk.insert({ x: i, text: textString });
+ bulk.insert({x: i, text: textString});
}
- assert.writeOK(bulk.execute({ w: replNodes, wtimeout: 30000 }));
+ assert.writeOK(bulk.execute({w: replNodes, wtimeout: 30000}));
// Take down two nodes and make sure slaveOk reads still work
replSet1.stop(1);
replSet1.stop(2);
- testDB.getMongo().adminCommand({ setParameter : 1, logLevel : 1 });
+ testDB.getMongo().adminCommand({setParameter: 1, logLevel: 1});
testDB.getMongo().setSlaveOk();
print("trying some queries");
- assert.soon(function() { try {
- testDB.foo.find().next();
- }
- catch(e) {
- print(e);
- return false;
- }
- return true;
- }, "Queries took too long to complete correctly.",
- 2 * 60 * 1000);
-
+ assert.soon(function() {
+ try {
+ testDB.foo.find().next();
+ } catch (e) {
+ print(e);
+ return false;
+ }
+ return true;
+ }, "Queries took too long to complete correctly.", 2 * 60 * 1000);
+
// Shutdown cluster
shardingTest.stop();
diff --git a/jstests/sharding/shard_key_immutable.js b/jstests/sharding/shard_key_immutable.js
index c05ff17e365..76a648d8811 100644
--- a/jstests/sharding/shard_key_immutable.js
+++ b/jstests/sharding/shard_key_immutable.js
@@ -6,7 +6,8 @@
*
* To enforce this invariant, we have the following mongos rule:
*
- * - Upserts must always contain the full shard key and must only be targeted* to the applicable shard.
+ * - Upserts must always contain the full shard key and must only be targeted* to the applicable
+ *shard.
*
* and the following mongod rules:
*
@@ -15,16 +16,20 @@
* match this value.
* - Updates must not modify shard keys.
*
- * *Updates are targeted by the update query if $op-style, or the update document if replacement-style.
+ * *Updates are targeted by the update query if $op-style, or the update document if
+ *replacement-style.
*
- * NOTE: The above is enough to ensure that shard keys do not change. It is not enough to ensure
- * uniqueness of an upserted document based on the upsert query. This is necessary due to the save()
+ * NOTE: The above is enough to ensure that shard keys do not change. It is not enough to ensure
+ * uniqueness of an upserted document based on the upsert query. This is necessary due to the
+ *save()
* style operation:
- * db.coll.update({ _id : xxx }, { _id : xxx, shard : xxx, key : xxx, other : xxx }, { upsert : true })
+ * db.coll.update({ _id : xxx }, { _id : xxx, shard : xxx, key : xxx, other : xxx }, { upsert : true
+ *})
*
- * TODO: Minimize the impact of this hole by disallowing anything but save-style upserts of this form.
+ * TODO: Minimize the impact of this hole by disallowing anything but save-style upserts of this
+ *form.
* Save-style upserts of this form are not safe (duplicate _ids can be created) but the user is
- * explicitly responsible for this for the _id field.
+ * explicitly responsible for this for the _id field.
*
* In addition, there is an rule where non-multi updates can only affect 0 or 1 documents.
*
@@ -37,12 +42,12 @@
* - $ op updates have multiUpdate flag set to true.
*/
-var st = new ShardingTest({ shards: 2 });
+var st = new ShardingTest({shards: 2});
-st.adminCommand({ enablesharding: "test" });
+st.adminCommand({enablesharding: "test"});
st.ensurePrimaryShard('test', 'shard0001');
-st.adminCommand({ shardcollection: "test.col0", key: { a: 1, b: 1 }});
-st.adminCommand({ shardcollection: "test.col1", key: { 'x.a': 1 }});
+st.adminCommand({shardcollection: "test.col0", key: {a: 1, b: 1}});
+st.adminCommand({shardcollection: "test.col1", key: {'x.a': 1}});
var db = st.s.getDB('test');
var compoundColl = db.getCollection('col0');
@@ -53,104 +58,104 @@ var dotColl = db.getCollection('col1');
//
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({}, { a: 1 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({}, {a: 1}, false));
var doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
doc = compoundColl.findOne();
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({}, { a: 1, b: 1 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({}, {a: 1, b: 1}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({}, { a: 100, b: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({}, {a: 100, b: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({}, { a: 100, b: 100, _id: 1 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({}, {a: 100, b: 100, _id: 1}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({}, { $set: { a: 1, b: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({}, {$set: {a: 1, b: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({}, { $set: { a: 100, b: 100 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({}, {$set: {a: 100, b: 100}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Cannot modify _id
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({}, { $set: { a: 1, b: 1, _id: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({}, {$set: {a: 1, b: 1, _id: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({}, { $set: { c: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({}, {$set: {c: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100, c: 1}), 'doc did not change: ' + tojson(doc));
//
// Empty query upsert
//
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({}, { a: 1 }, true));
+assert.writeError(compoundColl.update({}, {a: 1}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeOK(compoundColl.update({}, { a: 1, b: 1 }, true));
+assert.writeOK(compoundColl.update({}, {a: 1, b: 1}, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 1, b: 1 }), 'doc not upserted properly: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 1, b: 1}), 'doc not upserted properly: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-assert.writeOK(compoundColl.update({}, { a: 1, b: 1, _id: 1 }, true));
+assert.writeOK(compoundColl.update({}, {a: 1, b: 1, _id: 1}, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 1, b: 1 }), 'doc not upserted properly: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 1, b: 1}), 'doc not upserted properly: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({}, { $set: { a: 1 }}, true, true));
+assert.writeError(compoundColl.update({}, {$set: {a: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({}, { $set: { a: 1, b: 1 }}, true, true));
+assert.writeError(compoundColl.update({}, {$set: {a: 1, b: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({}, { $set: { a: 1, b: 1, _id: 1 }}, true, true));
+assert.writeError(compoundColl.update({}, {$set: {a: 1, b: 1, _id: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({}, { $set: { c: 1 }}, true, true));
+assert.writeError(compoundColl.update({}, {$set: {c: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
@@ -159,154 +164,154 @@ assert(doc == null, 'doc upserted: ' + tojson(doc));
//
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100 }, { a: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100}, {a: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100 }, { a: 2 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100}, {a: 2}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100 }, { a: 100, b: 1 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100}, {a: 100, b: 1}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Inspecting query and update alone is not enough to tell whether a shard key will change.
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ a: 100 }, { a: 100, b: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({a: 100}, {a: 100, b: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100 }, { a: 100, b: 100, _id: 1 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100}, {a: 100, b: 100, _id: 1}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ a: 100 }, { $set: { a: 100 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({a: 100}, {$set: {a: 100}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100 }, { $set: { b: 200 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100}, {$set: {b: 200}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Inspecting query and update alone is not enough to tell whether a shard key will change.
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ a: 100 }, { $set: { b: 100 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({a: 100}, {$set: {b: 100}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100 }, { $set: { a: 100, b: 200 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100}, {$set: {a: 100, b: 200}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Inspecting query and update alone is not enough to tell whether a shard key will change.
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ a: 100 }, { $set: { a: 100, b: 100 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({a: 100}, {$set: {a: 100, b: 100}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100}, {$set: {a: 100, b: 100, _id: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ a: 100 }, { $set: { c: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({a: 100}, {$set: {c: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100, c: 1}), 'doc did not change: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ a: 100 }, { $rename: { c: 'a' }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({a: 100}, {$rename: {c: 'a'}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
//
// Partial skey query upsert
//
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100 }, { a: 100 }, true));
+assert.writeError(compoundColl.update({a: 100}, {a: 100}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100 }, { a: 2 }, true));
+assert.writeError(compoundColl.update({a: 100}, {a: 2}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100 }, { a: 1, b: 1 }, true));
+assert.writeError(compoundColl.update({a: 100}, {a: 1, b: 1}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100 }, { a: 1, b: 1, _id: 1 }, true));
+assert.writeError(compoundColl.update({a: 100}, {a: 1, b: 1, _id: 1}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100 }, { $set: { a: 1 }}, true, true));
+assert.writeError(compoundColl.update({a: 100}, {$set: {a: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100 }, { $set: { b: 1 }}, true, true));
+assert.writeError(compoundColl.update({a: 100}, {$set: {b: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100 }, { $set: { a: 100, b: 1 }}, true, true));
+assert.writeError(compoundColl.update({a: 100}, {$set: {a: 100, b: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100 }, { $set: { a: 100, b: 1, _id: 1 }}, true, true));
+assert.writeError(compoundColl.update({a: 100}, {$set: {a: 100, b: 1, _id: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100 }, { $set: { c: 1 }}, true, true));
+assert.writeError(compoundColl.update({a: 100}, {$set: {c: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100 }, { $rename: { c: 'a' }}, true, true));
+assert.writeError(compoundColl.update({a: 100}, {$rename: {c: 'a'}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
@@ -315,162 +320,162 @@ assert(doc == null, 'doc upserted: ' + tojson(doc));
//
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ b: 100 }, { b: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({b: 100}, {b: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ b: 100 }, { b: 2 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({b: 100}, {b: 2}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ b: 100 }, { a: 1 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({b: 100}, {a: 1}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Inspecting query and update alone is not enough to tell whether a shard key will change.
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ b: 100 }, { a: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({b: 100}, {a: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ b: 100 }, { a: 1, b: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({b: 100}, {a: 1, b: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Inspecting query and update alone is not enough to tell whether a shard key will change.
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ b: 100 }, { a: 100, b: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({b: 100}, {a: 100, b: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ b: 100 }, { a: 1, b: 1, _id: 1 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({b: 100}, {a: 1, b: 1, _id: 1}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ b: 100 }, { $set: { b: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({b: 100}, {$set: {b: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({b: 100}, {$set: {a: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ b: 100 }, { $set: { a: 100 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({b: 100}, {$set: {a: 100}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 1, b: 100 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({b: 100}, {$set: {a: 1, b: 100}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Inspecting query and update alone is not enough to tell whether a shard key will change.
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ b: 100 }, { $set: { a: 100, b: 100 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({b: 100}, {$set: {a: 100, b: 100}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({b: 100}, {$set: {a: 100, b: 100, _id: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ b: 100 }, { $set: { c: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({b: 100}, {$set: {c: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100, c: 1}), 'doc did not change: ' + tojson(doc));
//
// Not prefix of skey query upsert
//
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ b: 100 }, { b: 100 }, true));
+assert.writeError(compoundColl.update({b: 100}, {b: 100}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ b: 100 }, { b: 2 }, true));
+assert.writeError(compoundColl.update({b: 100}, {b: 2}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ b: 100 }, { a: 1 }, true));
+assert.writeError(compoundColl.update({b: 100}, {a: 1}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ b: 100 }, { a: 1, b: 1 }, true));
+assert.writeError(compoundColl.update({b: 100}, {a: 1, b: 1}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ b: 100 }, { a: 1, b: 1, _id: 1 }, true));
+assert.writeError(compoundColl.update({b: 100}, {a: 1, b: 1, _id: 1}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ b: 100 }, { $set: { b: 1 }}, true, true));
+assert.writeError(compoundColl.update({b: 100}, {$set: {b: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 1 }}, true, true));
+assert.writeError(compoundColl.update({b: 100}, {$set: {a: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 1, b: 1 }}, true, true));
+assert.writeError(compoundColl.update({b: 100}, {$set: {a: 1, b: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ b: 100 }, { $set: { a: 1, b: 1, _id: 1 }}, true, true));
+assert.writeError(compoundColl.update({b: 100}, {$set: {a: 1, b: 1, _id: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ b: 100 }, { $set: { c: 1 }}, true, true));
+assert.writeError(compoundColl.update({b: 100}, {$set: {c: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc upserted: ' + tojson(doc));
@@ -479,211 +484,212 @@ assert(doc == null, 'doc upserted: ' + tojson(doc));
//
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100, b: 100 }, { a: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100, b: 100}, {a: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, c: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({a: 100, b: 100}, {a: 100, b: 100, c: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100, c: 100 }), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100, c: 100}), 'doc did not change: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, _id: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100, b: 100}, {a: 100, b: 100, _id: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100, b: 100 }, { b: 100 }, false));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100, b: 100}, {b: 100}, false));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { b: 100, c: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({a: 100, b: 100}, {$set: {b: 100, c: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100, c: 1}), 'doc did not change: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, c: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({a: 100, b: 100}, {$set: {a: 100, b: 100, c: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100, c: 1}), 'doc did not change: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(
+ compoundColl.update({a: 100, b: 100}, {$set: {a: 100, b: 100, _id: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeError(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 2, c: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeError(compoundColl.update({a: 100, b: 100}, {$set: {a: 100, b: 2, c: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { c: 1 }}, false, true));
+compoundColl.insert({a: 100, b: 100});
+assert.writeOK(compoundColl.update({a: 100, b: 100}, {$set: {c: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100, c: 1}), 'doc did not change: ' + tojson(doc));
//
// Full skey query upsert
//
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100, b: 100 }, { a: 100 }, true));
+assert.writeError(compoundColl.update({a: 100, b: 100}, {a: 100}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, c: 1 }, true));
+assert.writeOK(compoundColl.update({a: 100, b: 100}, {a: 100, b: 100, c: 1}, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'wrong doc: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100, c: 1}), 'wrong doc: ' + tojson(doc));
// Cannot modify _id!
compoundColl.remove({}, false);
-assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { a: 100, b: 100, _id: 100 }, true));
+assert.writeOK(compoundColl.update({a: 100, b: 100}, {a: 100, b: 100, _id: 100}, true));
doc = compoundColl.findOne();
-assert(friendlyEqual(doc, { _id: 100, a: 100, b: 100 }), 'wrong doc: ' + tojson(doc));
+assert(friendlyEqual(doc, {_id: 100, a: 100, b: 100}), 'wrong doc: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100, b: 100 }, { b: 100 }, true));
+assert.writeError(compoundColl.update({a: 100, b: 100}, {b: 100}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { b: 100, c: 1 }}, true, true));
+assert.writeOK(compoundColl.update({a: 100, b: 100}, {$set: {b: 100, c: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc != null, 'doc was not upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, c: 1 }}, true, true));
+assert.writeOK(compoundColl.update({a: 100, b: 100}, {$set: {a: 100, b: 100, c: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc != null, 'doc was not upserted: ' + tojson(doc));
// Can upsert with new _id
compoundColl.remove({}, false);
-assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 100, _id: 1 }}, true, true));
+assert.writeOK(compoundColl.update({a: 100, b: 100}, {$set: {a: 100, b: 100, _id: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc != null, 'doc was not upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ a: 100, b: 100 }, { $set: { a: 100, b: 2, c: 1 }}, true, true));
+assert.writeError(compoundColl.update({a: 100, b: 100}, {$set: {a: 100, b: 2, c: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeOK(compoundColl.update({ a: 100, b: 100 }, { $set: { c: 1 }}, true, true));
+assert.writeOK(compoundColl.update({a: 100, b: 100}, {$set: {c: 1}}, true, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100, c: 1 }), 'wrong doc: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100, c: 1}), 'wrong doc: ' + tojson(doc));
//
// _id query update
//
compoundColl.remove({}, false);
-compoundColl.insert({ _id: 1, a: 100, b: 100 });
-assert.writeError(compoundColl.update({ _id: 1 }, { a: 1 }));
+compoundColl.insert({_id: 1, a: 100, b: 100});
+assert.writeError(compoundColl.update({_id: 1}, {a: 1}));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
// Special case for _id. This is for making save method work.
compoundColl.remove({}, false);
-compoundColl.insert({ _id: 1, a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ _id: 1 }, { a: 100, b: 100 }));
+compoundColl.insert({_id: 1, a: 100, b: 100});
+assert.writeOK(compoundColl.update({_id: 1}, {a: 100, b: 100}));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ _id: 1, a: 100, b: 100 });
-assert.writeError(compoundColl.update({ _id: 1 }, { a: 1, b: 1 }));
+compoundColl.insert({_id: 1, a: 100, b: 100});
+assert.writeError(compoundColl.update({_id: 1}, {a: 1, b: 1}));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ _id: 1, a: 100, b: 100 });
-assert.writeError(compoundColl.update({ _id: 1 }, { $set: { a: 1 }}, false, true));
+compoundColl.insert({_id: 1, a: 100, b: 100});
+assert.writeError(compoundColl.update({_id: 1}, {$set: {a: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ _id: 1, a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ _id: 1 }, { $set: { a: 100 }}, false, true));
+compoundColl.insert({_id: 1, a: 100, b: 100});
+assert.writeOK(compoundColl.update({_id: 1}, {$set: {a: 100}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ _id: 1, a: 100, b: 100 });
-assert.writeError(compoundColl.update({ _id: 1 }, { $set: { b: 1 }}, false, true));
+compoundColl.insert({_id: 1, a: 100, b: 100});
+assert.writeError(compoundColl.update({_id: 1}, {$set: {b: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ _id: 1, a: 100, b: 100 });
-assert.writeOK(compoundColl.update({ _id: 1 }, { $set: { b: 100 }}, false, true));
+compoundColl.insert({_id: 1, a: 100, b: 100});
+assert.writeOK(compoundColl.update({_id: 1}, {$set: {b: 100}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
compoundColl.remove({}, false);
-compoundColl.insert({ _id: 1, a: 100, b: 100 });
-assert.writeError(compoundColl.update({ _id: 1 }, { $set: { a: 1, b: 1 }}, false, true));
+compoundColl.insert({_id: 1, a: 100, b: 100});
+assert.writeError(compoundColl.update({_id: 1}, {$set: {a: 1, b: 1}}, false, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 100, b: 100 }), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 100, b: 100}), 'doc changed: ' + tojson(doc));
//
// _id query upsert
//
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ _id: 1 }, { a: 1 }, true));
+assert.writeError(compoundColl.update({_id: 1}, {a: 1}, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeOK(compoundColl.update({ _id: 1 }, { a: 1, b: 1 }, true));
+assert.writeOK(compoundColl.update({_id: 1}, {a: 1, b: 1}, true));
doc = compoundColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { a: 1, b: 1 }), 'bad doc: ' + tojson(doc));
+assert(friendlyEqual(doc, {a: 1, b: 1}), 'bad doc: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ _id: 1 }, { $set: { a: 1 }}, true, true));
+assert.writeError(compoundColl.update({_id: 1}, {$set: {a: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ _id: 1 }, { $set: { b: 1 }}, true, true));
+assert.writeError(compoundColl.update({_id: 1}, {$set: {b: 1}}, true, true));
doc = compoundColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
compoundColl.remove({}, false);
-assert.writeError(compoundColl.update({ _id: 1 }, { $set: { a: 1, b: 1 }}, true, true));
+assert.writeError(compoundColl.update({_id: 1}, {$set: {a: 1, b: 1}}, true, true));
assert.eq(0, compoundColl.count(), 'doc should not be inserted');
//
@@ -691,112 +697,112 @@ assert.eq(0, compoundColl.count(), 'doc should not be inserted');
//
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeOK(dotColl.update({ 'x.a': 100 }, { x: { a: 100, b: 2 }}));
+dotColl.insert({x: {a: 100}});
+assert.writeOK(dotColl.update({'x.a': 100}, {x: {a: 100, b: 2}}));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100, b: 2 }}), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100, b: 2}}), 'doc did not change: ' + tojson(doc));
// Dotted field names in the resulting objects should not be allowed.
// This check currently resides in the client drivers.
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
+dotColl.insert({x: {a: 100}});
assert.throws(function() {
- dotColl.update({ 'x.a': 100 }, { x: { 'a.z': 100 }});
+ dotColl.update({'x.a': 100}, {x: {'a.z': 100}});
});
// Dotted field names in the resulting objects should not be allowed.
// This check currently resides in the client drivers.
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
+dotColl.insert({x: {a: 100}});
assert.throws(function() {
- dotColl.update({ 'x.a': 100 }, { 'x.a': 100 });
+ dotColl.update({'x.a': 100}, {'x.a': 100});
});
// Dotted field names in the resulting objects should not be allowed.
// This check currently resides in the client drivers.
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
+dotColl.insert({x: {a: 100}});
assert.throws(function() {
- dotColl.update({ 'x.a': 100 }, { 'x.a.z': 100 });
+ dotColl.update({'x.a': 100}, {'x.a.z': 100});
});
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeError(dotColl.update({ 'x.a': 100 }, { x: 100 }));
+dotColl.insert({x: {a: 100}});
+assert.writeError(dotColl.update({'x.a': 100}, {x: 100}));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100}}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeError(dotColl.update({ 'x.a': 100 }, { x: { b: 100 }}));
+dotColl.insert({x: {a: 100}});
+assert.writeError(dotColl.update({'x.a': 100}, {x: {b: 100}}));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100}}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 100, b: 2 }}}, false, true));
+dotColl.insert({x: {a: 100}});
+assert.writeOK(dotColl.update({'x.a': 100}, {$set: {x: {a: 100, b: 2}}}, false, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100, b: 2 }}), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100, b: 2}}), 'doc did not change: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 2 }}}, false, true));
+dotColl.insert({x: {a: 100}});
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {x: {a: 2}}}, false, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100}}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { b: 100 }}}, false, true));
+dotColl.insert({x: {a: 100}});
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {x: {b: 100}}}, false, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100}}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { 'x.a': 100, b: 2 }}, false, true));
+dotColl.insert({x: {a: 100}});
+assert.writeOK(dotColl.update({'x.a': 100}, {$set: {'x.a': 100, b: 2}}, false, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100 }, b: 2 }), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100}, b: 2}), 'doc did not change: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { 'a.z': 100 }}}, false, true));
+dotColl.insert({x: {a: 100}});
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {x: {'a.z': 100}}}, false, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100}}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { 'x.a.z': 100 }}, false, true));
+dotColl.insert({x: {a: 100}});
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {'x.a.z': 100}}, false, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100}}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: 100 }}, false, true));
+dotColl.insert({x: {a: 100}});
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {x: 100}}, false, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100 }}), 'doc changed: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100}}), 'doc changed: ' + tojson(doc));
dotColl.remove({}, false);
-dotColl.insert({ x: { a: 100 }});
-assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { 'x.b': 200 }}, false, true));
+dotColl.insert({x: {a: 100}});
+assert.writeOK(dotColl.update({'x.a': 100}, {$set: {'x.b': 200}}, false, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100, b: 200 }}), 'doc did not change: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100, b: 200}}), 'doc did not change: ' + tojson(doc));
//
// Dotted query upsert
//
dotColl.remove({}, false);
-assert.writeOK(dotColl.update({ 'x.a': 100 }, { x: { a: 100, b: 2 }}, true));
+assert.writeOK(dotColl.update({'x.a': 100}, {x: {a: 100, b: 2}}, true));
doc = dotColl.findOne();
assert(doc != null, 'doc was not upserted: ' + tojson(doc));
@@ -804,80 +810,79 @@ assert(doc != null, 'doc was not upserted: ' + tojson(doc));
// This check currently resides in the client drivers.
dotColl.remove({}, false);
assert.throws(function() {
- dotColl.update({ 'x.a': 100 }, { x: { 'a.z': 100 }}, true);
+ dotColl.update({'x.a': 100}, {x: {'a.z': 100}}, true);
});
// Dotted field names in the resulting objects should not be allowed.
// This check currently resides in the client drivers.
dotColl.remove({}, false);
assert.throws(function() {
- dotColl.update({ 'x.a': 100 }, { 'x.a': 100 }, true);
+ dotColl.update({'x.a': 100}, {'x.a': 100}, true);
});
// Dotted field names in the resulting objects should not be allowed.
// This check currently resides in the client drivers.
dotColl.remove({}, false);
assert.throws(function() {
- dotColl.update({ 'x.a': 100 }, { 'x.a.z': 100 }, true);
+ dotColl.update({'x.a': 100}, {'x.a.z': 100}, true);
});
dotColl.remove({}, false);
-assert.writeError(dotColl.update({ 'x.a': 100 }, { x: 100 }, true));
+assert.writeError(dotColl.update({'x.a': 100}, {x: 100}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-assert.writeError(dotColl.update({ 'x.a': 100 }, { x: { b: 100 }}, true));
+assert.writeError(dotColl.update({'x.a': 100}, {x: {b: 100}}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 100, b: 2 }}}, true));
+assert.writeOK(dotColl.update({'x.a': 100}, {$set: {x: {a: 100, b: 2}}}, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100, b: 2 }}), 'bad doc: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100, b: 2}}), 'bad doc: ' + tojson(doc));
dotColl.remove({}, false);
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { a: 2 }}}, true));
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {x: {a: 2}}}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { b: 100 }}}, true));
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {x: {b: 100}}}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { 'x.a': 100, b: 3 }}, true));
+assert.writeOK(dotColl.update({'x.a': 100}, {$set: {'x.a': 100, b: 3}}, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100 }, b: 3 }), 'bad doc: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100}, b: 3}), 'bad doc: ' + tojson(doc));
dotColl.remove({}, false);
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { 'x.a': 2 }}, true));
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {'x.a': 2}}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: { 'a.z': 100 }}}, true));
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {x: {'a.z': 100}}}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { 'x.a.z': 100 }}, true));
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {'x.a.z': 100}}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-assert.writeError(dotColl.update({ 'x.a': 100 }, { $set: { x: 100 }}, true));
+assert.writeError(dotColl.update({'x.a': 100}, {$set: {x: 100}}, true));
doc = dotColl.findOne();
assert(doc == null, 'doc was upserted: ' + tojson(doc));
dotColl.remove({}, false);
-assert.writeOK(dotColl.update({ 'x.a': 100 }, { $set: { 'x.b': 2 }}, true));
+assert.writeOK(dotColl.update({'x.a': 100}, {$set: {'x.b': 2}}, true));
doc = dotColl.findOne();
delete doc._id;
-assert(friendlyEqual(doc, { x: { a: 100, b: 2 }}), 'bad doc: ' + tojson(doc));
+assert(friendlyEqual(doc, {x: {a: 100, b: 2}}), 'bad doc: ' + tojson(doc));
st.stop();
-
diff --git a/jstests/sharding/shard_keycount.js b/jstests/sharding/shard_keycount.js
index 0524a210918..9a63a2cfdce 100644
--- a/jstests/sharding/shard_keycount.js
+++ b/jstests/sharding/shard_keycount.js
@@ -1,50 +1,46 @@
// Tests splitting a chunk twice
(function() {
-var s = new ShardingTest({ name: "shard_keycount",
- shards: 2,
- mongos: 1,
- other:{ chunkSize: 1 } });
+ var s = new ShardingTest({name: "shard_keycount", shards: 2, mongos: 1, other: {chunkSize: 1}});
-dbName = "test";
-collName = "foo";
-ns = dbName + "." + collName;
-
-db = s.getDB( dbName );
+ dbName = "test";
+ collName = "foo";
+ ns = dbName + "." + collName;
-for(var i = 0; i < 10; i++){
- db.foo.insert({ _id : i });
-}
+ db = s.getDB(dbName);
-// Enable sharding on DB
-s.adminCommand( { enablesharding : dbName } );
-s.ensurePrimaryShard(dbName, 'shard0001');
+ for (var i = 0; i < 10; i++) {
+ db.foo.insert({_id: i});
+ }
-// Enable sharding on collection
-s.adminCommand( { shardcollection : ns, key : { _id : 1 } } );
+ // Enable sharding on DB
+ s.adminCommand({enablesharding: dbName});
+ s.ensurePrimaryShard(dbName, 'shard0001');
+ // Enable sharding on collection
+ s.adminCommand({shardcollection: ns, key: {_id: 1}});
-// Split into two chunks
-s.adminCommand({ split : ns, find : { _id : 3 } });
+ // Split into two chunks
+ s.adminCommand({split: ns, find: {_id: 3}});
-coll = db.getCollection( collName );
+ coll = db.getCollection(collName);
-// Split chunk again
-s.adminCommand({ split : ns, find : { _id : 3 } });
+ // Split chunk again
+ s.adminCommand({split: ns, find: {_id: 3}});
-coll.update({ _id : 3 }, { _id : 3 });
+ coll.update({_id: 3}, {_id: 3});
-// Split chunk again
-s.adminCommand({ split : ns, find : { _id : 3 } });
+ // Split chunk again
+ s.adminCommand({split: ns, find: {_id: 3}});
-coll.update({ _id : 3 }, { _id : 3 });
+ coll.update({_id: 3}, {_id: 3});
-// Split chunk again
-// FAILS since the key count is based on the full index, not the chunk itself
-// i.e. Split point calc'd is 5 key offset (10 documents), but only four docs
-// in chunk with bounds _id : 0 => 5
-s.adminCommand({ split : ns, find : { _id : 3 } });
+ // Split chunk again
+ // FAILS since the key count is based on the full index, not the chunk itself
+ // i.e. Split point calc'd is 5 key offset (10 documents), but only four docs
+ // in chunk with bounds _id : 0 => 5
+ s.adminCommand({split: ns, find: {_id: 3}});
-s.stop();
+ s.stop();
});
diff --git a/jstests/sharding/shard_kill_and_pooling.js b/jstests/sharding/shard_kill_and_pooling.js
index 13ae6e41e2c..6b8397f9e37 100644
--- a/jstests/sharding/shard_kill_and_pooling.js
+++ b/jstests/sharding/shard_kill_and_pooling.js
@@ -8,93 +8,89 @@
// Run through the same test twice, once with a hard -9 kill, once with a regular shutdown
-for ( var test = 0; test < 2; test++ ) {
-
-var killWith = (test == 0 ? 15 : 9);
-
-var st = new ShardingTest({shards : 2, mongos : 1});
-
-// Stop balancer to eliminate weird conn stuff
-st.stopBalancer();
-
-var mongos = st.s0;
-var coll = mongos.getCollection("foo.bar");
-var db = coll.getDB();
-
-//Test is not valid for Win32
-var is32Bits = ( db.serverBuildInfo().bits == 32 );
-if ( is32Bits && _isWindows() ) {
-
- // Win32 doesn't provide the polling interface we need to implement the check tested here
- jsTest.log( "Test is not valid on Win32 platform." );
-
-}
-else {
-
- // Non-Win32 platform
-
- assert.writeOK(coll.insert({ hello: "world" }));
-
- jsTest.log("Creating new connections...");
-
- // Create a bunch of connections to the primary node through mongos.
- // jstest ->(x10)-> mongos ->(x10)-> primary
- var conns = [];
- for ( var i = 0; i < 50; i++) {
- conns.push(new Mongo(mongos.host));
- assert.neq( null, conns[i].getCollection(coll + "").findOne() );
- }
-
- jsTest.log("Returning the connections back to the pool.");
-
- for ( var i = 0; i < conns.length; i++ ) {
- conns[i] = null;
- }
- // Make sure we return connections back to the pool
- gc();
-
- // Don't make test fragile by linking to format of shardConnPoolStats, but this is useful if
- // something goes wrong.
- var connPoolStats = mongos.getDB("admin").runCommand({ shardConnPoolStats : 1 });
- printjson( connPoolStats );
-
- jsTest.log("Shutdown shard " + (killWith == 9 ? "uncleanly" : "" ) + "...");
-
- // Flush writes to disk, since sometimes we're killing uncleanly
- assert( mongos.getDB( "admin" ).runCommand({ fsync : 1 }).ok );
-
- MongoRunner.stopMongod( st.shard0, killWith );
-
- jsTest.log("Restart shard...");
-
- st.shard0 = MongoRunner.runMongod({ restart : st.shard0, forceLock : true });
-
- jsTest.log("Waiting for socket timeout time...");
-
- // Need to wait longer than the socket polling time.
- sleep(2 * 5000);
-
- jsTest.log("Run queries using new connections.");
-
- var numErrors = 0;
- for ( var i = 0; i < conns.length; i++) {
- var newConn = new Mongo(mongos.host);
- try {
- assert.neq( null, newConn.getCollection("foo.bar").findOne() );
- } catch (e) {
- printjson(e);
- numErrors++;
+for (var test = 0; test < 2; test++) {
+ var killWith = (test == 0 ? 15 : 9);
+
+ var st = new ShardingTest({shards: 2, mongos: 1});
+
+ // Stop balancer to eliminate weird conn stuff
+ st.stopBalancer();
+
+ var mongos = st.s0;
+ var coll = mongos.getCollection("foo.bar");
+ var db = coll.getDB();
+
+ // Test is not valid for Win32
+ var is32Bits = (db.serverBuildInfo().bits == 32);
+ if (is32Bits && _isWindows()) {
+ // Win32 doesn't provide the polling interface we need to implement the check tested here
+ jsTest.log("Test is not valid on Win32 platform.");
+
+ } else {
+ // Non-Win32 platform
+
+ assert.writeOK(coll.insert({hello: "world"}));
+
+ jsTest.log("Creating new connections...");
+
+ // Create a bunch of connections to the primary node through mongos.
+ // jstest ->(x10)-> mongos ->(x10)-> primary
+ var conns = [];
+ for (var i = 0; i < 50; i++) {
+ conns.push(new Mongo(mongos.host));
+ assert.neq(null, conns[i].getCollection(coll + "").findOne());
+ }
+
+ jsTest.log("Returning the connections back to the pool.");
+
+ for (var i = 0; i < conns.length; i++) {
+ conns[i] = null;
}
- }
-
- assert.eq(0, numErrors);
+ // Make sure we return connections back to the pool
+ gc();
+
+ // Don't make test fragile by linking to format of shardConnPoolStats, but this is useful if
+ // something goes wrong.
+ var connPoolStats = mongos.getDB("admin").runCommand({shardConnPoolStats: 1});
+ printjson(connPoolStats);
+
+ jsTest.log("Shutdown shard " + (killWith == 9 ? "uncleanly" : "") + "...");
+
+ // Flush writes to disk, since sometimes we're killing uncleanly
+ assert(mongos.getDB("admin").runCommand({fsync: 1}).ok);
+
+ MongoRunner.stopMongod(st.shard0, killWith);
+
+ jsTest.log("Restart shard...");
+
+ st.shard0 = MongoRunner.runMongod({restart: st.shard0, forceLock: true});
+
+ jsTest.log("Waiting for socket timeout time...");
+
+ // Need to wait longer than the socket polling time.
+ sleep(2 * 5000);
+
+ jsTest.log("Run queries using new connections.");
+
+ var numErrors = 0;
+ for (var i = 0; i < conns.length; i++) {
+ var newConn = new Mongo(mongos.host);
+ try {
+ assert.neq(null, newConn.getCollection("foo.bar").findOne());
+ } catch (e) {
+ printjson(e);
+ numErrors++;
+ }
+ }
+
+ assert.eq(0, numErrors);
-} // End Win32 check
+ } // End Win32 check
-st.stop();
+ st.stop();
-jsTest.log("DONE test " + test);
+ jsTest.log("DONE test " + test);
-} // End test loop
+} // End test loop
jsTest.log("DONE!");
diff --git a/jstests/sharding/shard_targeting.js b/jstests/sharding/shard_targeting.js
index ce8537d6fb4..183adac8f1e 100644
--- a/jstests/sharding/shard_targeting.js
+++ b/jstests/sharding/shard_targeting.js
@@ -5,63 +5,61 @@
// shards if the shard key happens to be one of the fields in the command object.
(function() {
-var s = new ShardingTest({ name: "shard_targeting", shards: 2 });
-s.adminCommand({ enablesharding : "test" });
-s.ensurePrimaryShard('test', 'shard0001');
+ var s = new ShardingTest({name: "shard_targeting", shards: 2});
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
-var db = s.getDB("test");
-var res;
+ var db = s.getDB("test");
+ var res;
-//
-// Target count command
-//
+ //
+ // Target count command
+ //
-// Shard key is the same with command name.
-s.shardColl("foo", {count: 1}, { count: "" });
+ // Shard key is the same with command name.
+ s.shardColl("foo", {count: 1}, {count: ""});
-for (var i=0; i<50; i++) {
- db.foo.insert({count: i}); // chunk [MinKey, ""), including numbers
- db.foo.insert({count: "" + i}); // chunk ["", MaxKey]
-}
+ for (var i = 0; i < 50; i++) {
+ db.foo.insert({count: i}); // chunk [MinKey, ""), including numbers
+ db.foo.insert({count: "" + i}); // chunk ["", MaxKey]
+ }
-var theOtherShard = s.getOther( s.getPrimaryShard( "test" ) ).name;
-s.printShardingStatus();
+ var theOtherShard = s.getOther(s.getPrimaryShard("test")).name;
+ s.printShardingStatus();
-// Count documents on both shards
+ // Count documents on both shards
-// "count" commnad with "query" option { }.
-assert.eq(db.foo.count(), 100);
-// Optional "query" option is not given.
-res = db.foo.runCommand("count");
-assert.eq(res.n, 100);
+ // "count" commnad with "query" option { }.
+ assert.eq(db.foo.count(), 100);
+ // Optional "query" option is not given.
+ res = db.foo.runCommand("count");
+ assert.eq(res.n, 100);
-//
-// Target mapreduce command
-//
-db.foo.drop();
-// Shard key is the same with command name.
-s.shardColl("foo", {mapReduce: 1}, { mapReduce: "" });
+ //
+ // Target mapreduce command
+ //
+ db.foo.drop();
+ // Shard key is the same with command name.
+ s.shardColl("foo", {mapReduce: 1}, {mapReduce: ""});
-for (var i=0; i<50; i++) {
- db.foo.insert({mapReduce: i}); // to the chunk including number
- db.foo.insert({mapReduce: "" + i}); // to the chunk including string
-}
+ for (var i = 0; i < 50; i++) {
+ db.foo.insert({mapReduce: i}); // to the chunk including number
+ db.foo.insert({mapReduce: "" + i}); // to the chunk including string
+ }
-s.printShardingStatus();
+ s.printShardingStatus();
-function m() { emit("total", 1); }
-function r(k, v) { return Array.sum(v); }
-res = db.foo.runCommand(
-{
- mapReduce: "foo",
- map: m,
- reduce: r,
- out: { inline: 1 }
-});
+ function m() {
+ emit("total", 1);
+ }
+ function r(k, v) {
+ return Array.sum(v);
+ }
+ res = db.foo.runCommand({mapReduce: "foo", map: m, reduce: r, out: {inline: 1}});
-// Count documents on both shards
-assert.eq(res.results[0].value, 100);
+ // Count documents on both shards
+ assert.eq(res.results[0].value, 100);
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/shard_with_special_db_names.js b/jstests/sharding/shard_with_special_db_names.js
index cb1ae66a04c..e218a08609b 100644
--- a/jstests/sharding/shard_with_special_db_names.js
+++ b/jstests/sharding/shard_with_special_db_names.js
@@ -1,30 +1,28 @@
-(function(){
+(function() {
-var s = new ShardingTest({ name: "shard_with_special_db_names",
- shards: 2,
- mongos: 2 });
-var specialDB = "[a-z]+";
-var specialNS = specialDB + ".special";
+ var s = new ShardingTest({name: "shard_with_special_db_names", shards: 2, mongos: 2});
+ var specialDB = "[a-z]+";
+ var specialNS = specialDB + ".special";
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.data" , key : { num : 1 } } );
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.data", key: {num: 1}});
-// Test that the database will not complain "cannot have 2 database names that
-// differs on case"
-s.adminCommand( { enablesharding : specialDB } );
-s.ensurePrimaryShard(specialDB, 'shard0000');
-s.adminCommand( { shardcollection : specialNS, key : { num : 1 } } );
+ // Test that the database will not complain "cannot have 2 database names that
+ // differs on case"
+ s.adminCommand({enablesharding: specialDB});
+ s.ensurePrimaryShard(specialDB, 'shard0000');
+ s.adminCommand({shardcollection: specialNS, key: {num: 1}});
-var exists = s.getDB("config").collections.find( { _id: specialNS } ).count();
-assert.eq( exists, 1 );
+ var exists = s.getDB("config").collections.find({_id: specialNS}).count();
+ assert.eq(exists, 1);
-// Test that drop database properly cleans up config
-s.getDB(specialDB).dropDatabase();
+ // Test that drop database properly cleans up config
+ s.getDB(specialDB).dropDatabase();
-var cursor = s.getDB("config").collections.find( { _id: specialNS } );
+ var cursor = s.getDB("config").collections.find({_id: specialNS});
-assert.eq( cursor.count(), 1 );
-assert( cursor.next()["dropped"] );
+ assert.eq(cursor.count(), 1);
+ assert(cursor.next()["dropped"]);
})();
diff --git a/jstests/sharding/sharded_limit_batchsize.js b/jstests/sharding/sharded_limit_batchsize.js
index 57303ed530a..cd636b56cdc 100644
--- a/jstests/sharding/sharded_limit_batchsize.js
+++ b/jstests/sharding/sharded_limit_batchsize.js
@@ -2,118 +2,111 @@
// of limit and batchSize with sort return the correct results, and do not issue
// unnecessary getmores (see SERVER-14299).
(function() {
-'use strict';
-
-/**
- * Test the correctness of queries with sort and batchSize on a sharded cluster,
- * running the queries against collection 'coll'.
- */
-function testBatchSize(coll) {
- //Roll the cursor over the second batch and make sure it's correctly sized
- assert.eq(20, coll.find().sort({x: 1}).batchSize(3).itcount());
- assert.eq(15, coll.find().sort({x: 1}).batchSize(3).skip(5).itcount());
-}
-
-/**
- * Test the correctness of queries with sort and limit on a sharded cluster,
- * running the queries against collection 'coll'.
- */
-function testLimit(coll) {
- var cursor = coll.find().sort({x: 1}).limit(3);
- assert.eq(-10, cursor.next()["_id"]);
- assert.eq(-9, cursor.next()["_id"]);
- assert.eq(-8, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-
- assert.eq(13, coll.find().sort({x: 1}).limit(13).itcount());
-
- cursor = coll.find().sort({x: 1}).skip(5).limit(2);
- assert.eq(-5, cursor.next()["_id"]);
- assert.eq(-4, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-
- cursor = coll.find().sort({x: 1}).skip(9).limit(2);
- assert.eq(-1, cursor.next()["_id"]);
- assert.eq(1, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-
- cursor = coll.find().sort({x: 1}).skip(11).limit(2);
- assert.eq(2, cursor.next()["_id"]);
- assert.eq(3, cursor.next()["_id"]);
- assert(!cursor.hasNext());
-}
-
-//
-// Create a two-shard cluster. Have an unsharded collection and a sharded collection.
-//
-
-var st = new ShardingTest({
- shards: 2,
- other: {shardOptions: {setParameter: "enableTestCommands=1"}}
-});
-
-var db = st.s.getDB("test");
-var shardedCol = db.getCollection("sharded_limit_batchsize");
-var unshardedCol = db.getCollection("unsharded_limit_batchsize");
-shardedCol.drop();
-unshardedCol.drop();
-
-// Enable sharding and pre-split the sharded collection.
-assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
-st.ensurePrimaryShard(db.getName(), "shard0000");
-db.adminCommand({shardCollection: shardedCol.getFullName(), key: {_id: 1}});
-assert.commandWorked(db.adminCommand({split: shardedCol.getFullName(), middle: {_id: 0}}));
-assert.commandWorked(db.adminCommand({moveChunk: shardedCol.getFullName(),
- find: {_id: 0},
- to: "shard0001"}));
-
-// Write 10 documents to shard 0, and 10 documents to shard 1 inside the sharded collection.
-// Write 20 documents which all go to the primary shard in the unsharded collection.
-for (var i=1; i<=10; ++i) {
- // These go to shard 1.
- assert.writeOK(shardedCol.insert({_id: i, x: i}));
-
- // These go to shard 0.
- assert.writeOK(shardedCol.insert({_id: -i, x: -i}));
-
- // These go to shard 0 inside the non-sharded collection.
- assert.writeOK(unshardedCol.insert({_id: i, x: i}));
- assert.writeOK(unshardedCol.insert({_id: -i, x: -i}));
-}
-
-//
-// Run tests for batch size. These should issue getmores.
-//
-
-jsTest.log("Running batchSize tests against sharded collection.");
-st.shard0.adminCommand({setParameter: 1, logLevel : 1});
-testBatchSize(shardedCol);
-st.shard0.adminCommand({setParameter: 1, logLevel : 0});
-
-jsTest.log("Running batchSize tests against non-sharded collection.");
-testBatchSize(unshardedCol);
-
-//
-// Run tests for limit. These should *not* issue getmores. We confirm this
-// by enabling the getmore failpoint on the shards.
-//
-
-assert.commandWorked(st.shard0.getDB("test").adminCommand({
- configureFailPoint: "failReceivedGetmore",
- mode: "alwaysOn"
-}));
-
-assert.commandWorked(st.shard1.getDB("test").adminCommand({
- configureFailPoint: "failReceivedGetmore",
- mode: "alwaysOn"
-}));
-
-jsTest.log("Running limit tests against sharded collection.");
-testLimit(shardedCol, st.shard0);
-
-jsTest.log("Running limit tests against non-sharded collection.");
-testLimit(unshardedCol, st.shard0);
-
-st.stop();
+ 'use strict';
+
+ /**
+ * Test the correctness of queries with sort and batchSize on a sharded cluster,
+ * running the queries against collection 'coll'.
+ */
+ function testBatchSize(coll) {
+ // Roll the cursor over the second batch and make sure it's correctly sized
+ assert.eq(20, coll.find().sort({x: 1}).batchSize(3).itcount());
+ assert.eq(15, coll.find().sort({x: 1}).batchSize(3).skip(5).itcount());
+ }
+
+ /**
+ * Test the correctness of queries with sort and limit on a sharded cluster,
+ * running the queries against collection 'coll'.
+ */
+ function testLimit(coll) {
+ var cursor = coll.find().sort({x: 1}).limit(3);
+ assert.eq(-10, cursor.next()["_id"]);
+ assert.eq(-9, cursor.next()["_id"]);
+ assert.eq(-8, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+
+ assert.eq(13, coll.find().sort({x: 1}).limit(13).itcount());
+
+ cursor = coll.find().sort({x: 1}).skip(5).limit(2);
+ assert.eq(-5, cursor.next()["_id"]);
+ assert.eq(-4, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+
+ cursor = coll.find().sort({x: 1}).skip(9).limit(2);
+ assert.eq(-1, cursor.next()["_id"]);
+ assert.eq(1, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+
+ cursor = coll.find().sort({x: 1}).skip(11).limit(2);
+ assert.eq(2, cursor.next()["_id"]);
+ assert.eq(3, cursor.next()["_id"]);
+ assert(!cursor.hasNext());
+ }
+
+ //
+ // Create a two-shard cluster. Have an unsharded collection and a sharded collection.
+ //
+
+ var st = new ShardingTest(
+ {shards: 2, other: {shardOptions: {setParameter: "enableTestCommands=1"}}});
+
+ var db = st.s.getDB("test");
+ var shardedCol = db.getCollection("sharded_limit_batchsize");
+ var unshardedCol = db.getCollection("unsharded_limit_batchsize");
+ shardedCol.drop();
+ unshardedCol.drop();
+
+ // Enable sharding and pre-split the sharded collection.
+ assert.commandWorked(db.adminCommand({enableSharding: db.getName()}));
+ st.ensurePrimaryShard(db.getName(), "shard0000");
+ db.adminCommand({shardCollection: shardedCol.getFullName(), key: {_id: 1}});
+ assert.commandWorked(db.adminCommand({split: shardedCol.getFullName(), middle: {_id: 0}}));
+ assert.commandWorked(
+ db.adminCommand({moveChunk: shardedCol.getFullName(), find: {_id: 0}, to: "shard0001"}));
+
+ // Write 10 documents to shard 0, and 10 documents to shard 1 inside the sharded collection.
+ // Write 20 documents which all go to the primary shard in the unsharded collection.
+ for (var i = 1; i <= 10; ++i) {
+ // These go to shard 1.
+ assert.writeOK(shardedCol.insert({_id: i, x: i}));
+
+ // These go to shard 0.
+ assert.writeOK(shardedCol.insert({_id: -i, x: -i}));
+
+ // These go to shard 0 inside the non-sharded collection.
+ assert.writeOK(unshardedCol.insert({_id: i, x: i}));
+ assert.writeOK(unshardedCol.insert({_id: -i, x: -i}));
+ }
+
+ //
+ // Run tests for batch size. These should issue getmores.
+ //
+
+ jsTest.log("Running batchSize tests against sharded collection.");
+ st.shard0.adminCommand({setParameter: 1, logLevel: 1});
+ testBatchSize(shardedCol);
+ st.shard0.adminCommand({setParameter: 1, logLevel: 0});
+
+ jsTest.log("Running batchSize tests against non-sharded collection.");
+ testBatchSize(unshardedCol);
+
+ //
+ // Run tests for limit. These should *not* issue getmores. We confirm this
+ // by enabling the getmore failpoint on the shards.
+ //
+
+ assert.commandWorked(st.shard0.getDB("test").adminCommand(
+ {configureFailPoint: "failReceivedGetmore", mode: "alwaysOn"}));
+
+ assert.commandWorked(st.shard1.getDB("test").adminCommand(
+ {configureFailPoint: "failReceivedGetmore", mode: "alwaysOn"}));
+
+ jsTest.log("Running limit tests against sharded collection.");
+ testLimit(shardedCol, st.shard0);
+
+ jsTest.log("Running limit tests against non-sharded collection.");
+ testLimit(unshardedCol, st.shard0);
+
+ st.stop();
})();
diff --git a/jstests/sharding/sharded_profile.js b/jstests/sharding/sharded_profile.js
index 4877cb8f1ca..0a8d8424e35 100644
--- a/jstests/sharding/sharded_profile.js
+++ b/jstests/sharding/sharded_profile.js
@@ -3,33 +3,33 @@
(function() {
-var st = new ShardingTest({ shards: 1, mongos: 2 });
-st.stopBalancer();
+ var st = new ShardingTest({shards: 1, mongos: 2});
+ st.stopBalancer();
-var admin = st.s0.getDB('admin');
-var shards = st.s0.getCollection('config.shards').find().toArray();
-var coll = st.s0.getCollection('foo.bar');
+ var admin = st.s0.getDB('admin');
+ var shards = st.s0.getCollection('config.shards').find().toArray();
+ var coll = st.s0.getCollection('foo.bar');
-assert(admin.runCommand({ enableSharding: coll.getDB() + '' }).ok);
-assert(admin.runCommand({ shardCollection: coll + '', key: { _id: 1 } }).ok);
+ assert(admin.runCommand({enableSharding: coll.getDB() + ''}).ok);
+ assert(admin.runCommand({shardCollection: coll + '', key: {_id: 1}}).ok);
-st.printShardingStatus();
+ st.printShardingStatus();
-jsTest.log('Turning on profiling on ' + st.shard0);
+ jsTest.log('Turning on profiling on ' + st.shard0);
-st.shard0.getDB(coll.getDB().toString()).setProfilingLevel(2);
+ st.shard0.getDB(coll.getDB().toString()).setProfilingLevel(2);
-var profileColl = st.shard0.getDB(coll.getDB().toString()).system.profile;
+ var profileColl = st.shard0.getDB(coll.getDB().toString()).system.profile;
-var inserts = [{ _id: 0 }, { _id: 1 }, { _id: 2 }];
+ var inserts = [{_id: 0}, {_id: 1}, {_id: 2}];
-assert.writeOK(st.s1.getCollection(coll.toString()).insert(inserts));
+ assert.writeOK(st.s1.getCollection(coll.toString()).insert(inserts));
-profileEntry = profileColl.findOne();
-assert.neq(null, profileEntry);
-printjson(profileEntry);
-assert.eq(profileEntry.query.documents, inserts);
+ profileEntry = profileColl.findOne();
+ assert.neq(null, profileEntry);
+ printjson(profileEntry);
+ assert.eq(profileEntry.query.documents, inserts);
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/sharding_balance1.js b/jstests/sharding/sharding_balance1.js
index 57df8648559..2d45e829492 100644
--- a/jstests/sharding/sharding_balance1.js
+++ b/jstests/sharding/sharding_balance1.js
@@ -1,62 +1,71 @@
(function() {
-var s = new ShardingTest({ name: "slow_sharding_balance1",
- shards: 2,
- mongos: 1,
- other: { chunkSize: 1, enableBalancer : true } });
-
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-
-s.config.settings.find().forEach( printjson );
-
-db = s.getDB( "test" );
-
-bigString = "";
-while ( bigString.length < 10000 )
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-
-inserted = 0;
-num = 0;
-var bulk = db.foo.initializeUnorderedBulkOp();
-while ( inserted < ( 20 * 1024 * 1024 ) ){
- bulk.insert({ _id: num++, s: bigString });
- inserted += bigString.length;
-}
-assert.writeOK(bulk.execute());
-
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
-assert.lt( 20 , s.config.chunks.count() , "setup2" );
-
-function diff1(){
- var x = s.chunkCounts( "foo" );
- printjson( x );
- return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
-}
-
-function sum(){
- var x = s.chunkCounts( "foo" );
- return x.shard0000 + x.shard0001;
-}
-
-assert.lt( 20 , diff1() , "big differential here" );
-print( diff1() );
-
-assert.soon( function(){
- var d = diff1();
- return d < 5;
-// Make sure there's enough time here, since balancing can sleep for 15s or so between balances.
-} , "balance didn't happen" , 1000 * 60 * 5 , 5000 );
-
-var chunkCount = sum();
-s.adminCommand( { removeshard: "shard0000" } );
-
-assert.soon( function(){
- printjson(s.chunkCounts( "foo" ));
- s.config.shards.find().forEach(function(z){printjson(z);});
- return chunkCount == s.config.chunks.count({shard: "shard0001"});
-} , "removeshard didn't happen" , 1000 * 60 * 3 , 5000 );
-
-s.stop();
+ var s = new ShardingTest({
+ name: "slow_sharding_balance1",
+ shards: 2,
+ mongos: 1,
+ other: {chunkSize: 1, enableBalancer: true}
+ });
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+
+ s.config.settings.find().forEach(printjson);
+
+ db = s.getDB("test");
+
+ bigString = "";
+ while (bigString.length < 10000)
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+ inserted = 0;
+ num = 0;
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ while (inserted < (20 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+ }
+ assert.writeOK(bulk.execute());
+
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+ assert.lt(20, s.config.chunks.count(), "setup2");
+
+ function diff1() {
+ var x = s.chunkCounts("foo");
+ printjson(x);
+ return Math.max(x.shard0000, x.shard0001) - Math.min(x.shard0000, x.shard0001);
+ }
+
+ function sum() {
+ var x = s.chunkCounts("foo");
+ return x.shard0000 + x.shard0001;
+ }
+
+ assert.lt(20, diff1(), "big differential here");
+ print(diff1());
+
+ assert.soon(
+ function() {
+ var d = diff1();
+ return d < 5;
+ // Make sure there's enough time here, since balancing can sleep for 15s or so between
+ // balances.
+ },
+ "balance didn't happen",
+ 1000 * 60 * 5,
+ 5000);
+
+ var chunkCount = sum();
+ s.adminCommand({removeshard: "shard0000"});
+
+ assert.soon(function() {
+ printjson(s.chunkCounts("foo"));
+ s.config.shards.find().forEach(function(z) {
+ printjson(z);
+ });
+ return chunkCount == s.config.chunks.count({shard: "shard0001"});
+ }, "removeshard didn't happen", 1000 * 60 * 3, 5000);
+
+ s.stop();
})();
diff --git a/jstests/sharding/sharding_balance2.js b/jstests/sharding/sharding_balance2.js
index e7ad317e0f5..37c84ed8ded 100644
--- a/jstests/sharding/sharding_balance2.js
+++ b/jstests/sharding/sharding_balance2.js
@@ -3,74 +3,73 @@
*/
(function() {
-"use strict";
+ "use strict";
-var MaxSizeMB = 1;
+ var MaxSizeMB = 1;
-var s = new ShardingTest({ shards: 2, other: { chunkSize: 1, manualAddShard: true }});
-var db = s.getDB( "test" );
-s.stopBalancer();
+ var s = new ShardingTest({shards: 2, other: {chunkSize: 1, manualAddShard: true}});
+ var db = s.getDB("test");
+ s.stopBalancer();
-var names = s.getConnNames();
-assert.eq(2, names.length);
-s.adminCommand({ addshard: names[0] });
-s.adminCommand({ addshard: names[1], maxSize: MaxSizeMB });
+ var names = s.getConnNames();
+ assert.eq(2, names.length);
+ s.adminCommand({addshard: names[0]});
+ s.adminCommand({addshard: names[1], maxSize: MaxSizeMB});
-s.adminCommand({ enablesharding: "test" });
-var res = db.adminCommand({ movePrimary: 'test', to: names[0] });
-assert(res.ok || res.errmsg == "it is already the primary");
+ s.adminCommand({enablesharding: "test"});
+ var res = db.adminCommand({movePrimary: 'test', to: names[0]});
+ assert(res.ok || res.errmsg == "it is already the primary");
+ var bigString = "";
+ while (bigString.length < 10000)
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-var bigString = "";
-while ( bigString.length < 10000 )
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+ var inserted = 0;
+ var num = 0;
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ while (inserted < (40 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+ }
+ assert.writeOK(bulk.execute());
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+ assert.gt(s.config.chunks.count(), 10);
-var inserted = 0;
-var num = 0;
-var bulk = db.foo.initializeUnorderedBulkOp();
-while ( inserted < ( 40 * 1024 * 1024 ) ){
- bulk.insert({ _id: num++, s: bigString });
- inserted += bigString.length;
-}
-assert.writeOK(bulk.execute());
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
-assert.gt(s.config.chunks.count(), 10);
+ var getShardSize = function(conn) {
+ var listDatabases = conn.getDB('admin').runCommand({listDatabases: 1});
+ return listDatabases.totalSize;
+ };
-var getShardSize = function(conn) {
- var listDatabases = conn.getDB('admin').runCommand({ listDatabases: 1 });
- return listDatabases.totalSize;
-};
+ var shardConn = new Mongo(names[1]);
-var shardConn = new Mongo(names[1]);
+ // Make sure that shard doesn't have any documents.
+ assert.eq(0, shardConn.getDB('test').foo.find().itcount());
-// Make sure that shard doesn't have any documents.
-assert.eq(0, shardConn.getDB('test').foo.find().itcount());
+ var maxSizeBytes = MaxSizeMB * 1024 * 1024;
-var maxSizeBytes = MaxSizeMB * 1024 * 1024;
+ // Fill the shard with documents to exceed the max size so the balancer won't move
+ // chunks to this shard.
+ var localColl = shardConn.getDB('local').padding;
+ while (getShardSize(shardConn) < maxSizeBytes) {
+ var localBulk = localColl.initializeUnorderedBulkOp();
-// Fill the shard with documents to exceed the max size so the balancer won't move
-// chunks to this shard.
-var localColl = shardConn.getDB('local').padding;
-while (getShardSize(shardConn) < maxSizeBytes) {
- var localBulk = localColl.initializeUnorderedBulkOp();
+ for (var x = 0; x < 20; x++) {
+ localBulk.insert({x: x, val: bigString});
+ }
+ assert.writeOK(localBulk.execute());
- for (var x = 0; x < 20; x++) {
- localBulk.insert({ x: x, val: bigString });
+ // Force the storage engine to flush files to disk so shardSize will get updated.
+ assert.commandWorked(shardConn.getDB('admin').runCommand({fsync: 1}));
}
- assert.writeOK(localBulk.execute());
-
- // Force the storage engine to flush files to disk so shardSize will get updated.
- assert.commandWorked(shardConn.getDB('admin').runCommand({ fsync: 1 }));
-}
-s.startBalancer();
+ s.startBalancer();
-// Wait until balancer finishes at least one balancing round.
-assert(s.waitForBalancerRound(), "Balancer is not running: it never pinged config.mongos");
+ // Wait until balancer finishes at least one balancing round.
+ assert(s.waitForBalancerRound(), "Balancer is not running: it never pinged config.mongos");
-var chunkCounts = s.chunkCounts('foo', 'test');
-assert.eq(0, chunkCounts.shard0001);
+ var chunkCounts = s.chunkCounts('foo', 'test');
+ assert.eq(0, chunkCounts.shard0001);
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/sharding_balance3.js b/jstests/sharding/sharding_balance3.js
index 51e5765b19e..876709ace4a 100644
--- a/jstests/sharding/sharding_balance3.js
+++ b/jstests/sharding/sharding_balance3.js
@@ -1,68 +1,69 @@
-// Simple test to make sure things get balanced
+// Simple test to make sure things get balanced
(function() {
-var s = new ShardingTest({ name: "slow_sharding_balance3",
- shards: 2,
- mongos: 1,
- other: { chunkSize: 1, enableBalancer : true } });
-
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-
-s.config.settings.find().forEach( printjson );
-
-db = s.getDB( "test" );
-
-bigString = "";
-while ( bigString.length < 10000 )
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-
-inserted = 0;
-num = 0;
-var bulk = db.foo.initializeUnorderedBulkOp();
-while ( inserted < ( 40 * 1024 * 1024 ) ){
- bulk.insert({ _id: num++, s: bigString });
- inserted += bigString.length;
-}
-assert.writeOK(bulk.execute());
-
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
-assert.lt( 20 , s.config.chunks.count() , "setup2" );
-
-function diff1(){
- var x = s.chunkCounts( "foo" );
- printjson( x );
- return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
-}
-
-assert.lt( 10 , diff1() );
-
-// Wait for balancer to kick in.
-var initialDiff = diff1();
-assert.soon(function() {
- return diff1() != initialDiff;
- }, "Balancer did not kick in");
-
-print("* A");
-print( "disabling the balancer" );
-s.config.settings.update( { _id : "balancer" }, { $set : { stopped : true } } , true );
-s.config.settings.find().forEach( printjson );
-print("* B");
-
-
-print( diff1() );
-
-var currDiff = diff1();
-var waitTime = 0;
-var startTime = Date.now();
-while ( waitTime < ( 1000 * 60 ) ) {
- // Wait for 60 seconds to ensure balancer did not run
- assert.eq( currDiff, diff1(), "balance with stopped flag should not have happened" );
- sleep( 5000 );
- waitTime = Date.now() - startTime;
-}
-
-s.stop();
+ var s = new ShardingTest({
+ name: "slow_sharding_balance3",
+ shards: 2,
+ mongos: 1,
+ other: {chunkSize: 1, enableBalancer: true}
+ });
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+
+ s.config.settings.find().forEach(printjson);
+
+ db = s.getDB("test");
+
+ bigString = "";
+ while (bigString.length < 10000)
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+ inserted = 0;
+ num = 0;
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ while (inserted < (40 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString});
+ inserted += bigString.length;
+ }
+ assert.writeOK(bulk.execute());
+
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+ assert.lt(20, s.config.chunks.count(), "setup2");
+
+ function diff1() {
+ var x = s.chunkCounts("foo");
+ printjson(x);
+ return Math.max(x.shard0000, x.shard0001) - Math.min(x.shard0000, x.shard0001);
+ }
+
+ assert.lt(10, diff1());
+
+ // Wait for balancer to kick in.
+ var initialDiff = diff1();
+ assert.soon(function() {
+ return diff1() != initialDiff;
+ }, "Balancer did not kick in");
+
+ print("* A");
+ print("disabling the balancer");
+ s.config.settings.update({_id: "balancer"}, {$set: {stopped: true}}, true);
+ s.config.settings.find().forEach(printjson);
+ print("* B");
+
+ print(diff1());
+
+ var currDiff = diff1();
+ var waitTime = 0;
+ var startTime = Date.now();
+ while (waitTime < (1000 * 60)) {
+ // Wait for 60 seconds to ensure balancer did not run
+ assert.eq(currDiff, diff1(), "balance with stopped flag should not have happened");
+ sleep(5000);
+ waitTime = Date.now() - startTime;
+ }
+
+ s.stop();
})();
diff --git a/jstests/sharding/sharding_balance4.js b/jstests/sharding/sharding_balance4.js
index 3a89efce5c7..d78e94d407f 100644
--- a/jstests/sharding/sharding_balance4.js
+++ b/jstests/sharding/sharding_balance4.js
@@ -1,140 +1,140 @@
// Check that doing updates done during a migrate all go to the right place
(function() {
-var s = new ShardingTest({ name: "slow_sharding_balance4",
- shards: 2,
- mongos: 1,
- other: { chunkSize: 1 } });
-
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
-assert.eq( 1 , s.config.chunks.count() , "setup1" );
-
-s.config.settings.find().forEach( printjson );
-
-db = s.getDB( "test" );
-
-bigString = "";
-while ( bigString.length < 10000 )
- bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
-
-N = 3000;
-
-num = 0;
-
-counts = {};
-
-//
-// TODO: Rewrite to make much clearer.
-//
-// The core behavior of this test is to add a bunch of documents to a sharded collection, then
-// incrementally update each document and make sure the counts in the document match our update
-// counts while balancing occurs (doUpdate()). Every once in a while we also check (check())
-// our counts via a query.
-//
-// If during a chunk migration an update is missed, we trigger an assertion and fail.
-//
-
-
-function doUpdate( bulk, includeString, optionalId ){
- var up = { $inc : { x : 1 } };
- if ( includeString )
- up["$set"] = { s : bigString };
- var myid = optionalId == undefined ? Random.randInt( N ) : optionalId;
- bulk.find({ _id : myid }).upsert().update( up );
-
- counts[myid] = ( counts[myid] ? counts[myid] : 0 ) + 1;
- return myid;
-}
-
-Random.setRandomSeed();
-// Initially update all documents from 1 to N, otherwise later checks can fail because no document
-// previously existed
-var bulk = db.foo.initializeUnorderedBulkOp();
-for ( i = 0; i < N; i++ ){
- doUpdate( bulk, true, i );
-}
-
-for ( i=0; i<N*9; i++ ){
- doUpdate( bulk, false );
-}
-assert.writeOK(bulk.execute());
-
-for ( var i=0; i<50; i++ ){
- s.printChunks( "test.foo" );
- if ( check( "initial:" + i , true ) )
- break;
- sleep( 5000 );
-}
-check( "initial at end" );
-
-
-assert.lt( 20 , s.config.chunks.count() , "setup2" );
-
-function check( msg , dontAssert ){
- for ( var x in counts ){
- var e = counts[x];
- var z = db.foo.findOne( { _id : parseInt( x ) } );
-
- if ( z && z.x == e )
- continue;
-
- if ( dontAssert ){
- if ( z )
- delete z.s;
- print( "not asserting for key failure: " + x + " want: " + e + " got: " + tojson(z) );
- return false;
- }
-
- s.s.getDB("admin").runCommand({ setParameter : 1, logLevel : 2 });
-
- printjson( db.foo.findOne( { _id : parseInt( x ) } ) );
+ var s = new ShardingTest(
+ {name: "slow_sharding_balance4", shards: 2, mongos: 1, other: {chunkSize: 1}});
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
+ assert.eq(1, s.config.chunks.count(), "setup1");
+
+ s.config.settings.find().forEach(printjson);
+
+ db = s.getDB("test");
+
+ bigString = "";
+ while (bigString.length < 10000)
+ bigString += "asdasdasdasdadasdasdasdasdasdasdasdasda";
+
+ N = 3000;
+
+ num = 0;
+
+ counts = {};
+
+ //
+ // TODO: Rewrite to make much clearer.
+ //
+ // The core behavior of this test is to add a bunch of documents to a sharded collection, then
+ // incrementally update each document and make sure the counts in the document match our update
+ // counts while balancing occurs (doUpdate()). Every once in a while we also check (check())
+ // our counts via a query.
+ //
+ // If during a chunk migration an update is missed, we trigger an assertion and fail.
+ //
+
+ function doUpdate(bulk, includeString, optionalId) {
+ var up = {
+ $inc: {x: 1}
+ };
+ if (includeString)
+ up["$set"] = {
+ s: bigString
+ };
+ var myid = optionalId == undefined ? Random.randInt(N) : optionalId;
+ bulk.find({_id: myid}).upsert().update(up);
+
+ counts[myid] = (counts[myid] ? counts[myid] : 0) + 1;
+ return myid;
+ }
- var y = db.foo.findOne( { _id : parseInt( x ) } );
+ Random.setRandomSeed();
+ // Initially update all documents from 1 to N, otherwise later checks can fail because no
+ // document
+ // previously existed
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ for (i = 0; i < N; i++) {
+ doUpdate(bulk, true, i);
+ }
- if ( y ){
- delete y.s;
- }
+ for (i = 0; i < N * 9; i++) {
+ doUpdate(bulk, false);
+ }
+ assert.writeOK(bulk.execute());
- s.printChunks( "test.foo" );
-
- assert( z , "couldn't find : " + x + " y:" + tojson(y) + " e: " + e + " " + msg );
- assert.eq( e , z.x , "count for : " + x + " y:" + tojson(y) + " " + msg );
+ for (var i = 0; i < 50; i++) {
+ s.printChunks("test.foo");
+ if (check("initial:" + i, true))
+ break;
+ sleep(5000);
}
+ check("initial at end");
+
+ assert.lt(20, s.config.chunks.count(), "setup2");
+
+ function check(msg, dontAssert) {
+ for (var x in counts) {
+ var e = counts[x];
+ var z = db.foo.findOne({_id: parseInt(x)});
+
+ if (z && z.x == e)
+ continue;
+
+ if (dontAssert) {
+ if (z)
+ delete z.s;
+ print("not asserting for key failure: " + x + " want: " + e + " got: " + tojson(z));
+ return false;
+ }
- return true;
-}
+ s.s.getDB("admin").runCommand({setParameter: 1, logLevel: 2});
-function diff1(){
-
- jsTest.log("Running diff1...");
+ printjson(db.foo.findOne({_id: parseInt(x)}));
- bulk = db.foo.initializeUnorderedBulkOp();
- var myid = doUpdate( bulk, false );
- var res = assert.writeOK(bulk.execute());
+ var y = db.foo.findOne({_id: parseInt(x)});
- assert.eq( 1, res.nModified,
- "diff myid: " + myid + " 2: " + res.toString() + "\n" +
- " correct count is: " + counts[myid] +
- " db says count is: " + tojson(db.foo.findOne({ _id: myid })) );
+ if (y) {
+ delete y.s;
+ }
- var x = s.chunkCounts( "foo" );
- if ( Math.random() > .999 )
- printjson( x );
- return Math.max( x.shard0000 , x.shard0001 ) - Math.min( x.shard0000 , x.shard0001 );
-}
+ s.printChunks("test.foo");
+
+ assert(z, "couldn't find : " + x + " y:" + tojson(y) + " e: " + e + " " + msg);
+ assert.eq(e, z.x, "count for : " + x + " y:" + tojson(y) + " " + msg);
+ }
+
+ return true;
+ }
+
+ function diff1() {
+ jsTest.log("Running diff1...");
+
+ bulk = db.foo.initializeUnorderedBulkOp();
+ var myid = doUpdate(bulk, false);
+ var res = assert.writeOK(bulk.execute());
+
+ assert.eq(1,
+ res.nModified,
+ "diff myid: " + myid + " 2: " + res.toString() + "\n" + " correct count is: " +
+ counts[myid] + " db says count is: " + tojson(db.foo.findOne({_id: myid})));
+
+ var x = s.chunkCounts("foo");
+ if (Math.random() > .999)
+ printjson(x);
+ return Math.max(x.shard0000, x.shard0001) - Math.min(x.shard0000, x.shard0001);
+ }
-assert.lt( 20 , diff1() ,"initial load" );
-print( diff1() );
+ assert.lt(20, diff1(), "initial load");
+ print(diff1());
-s.startBalancer();
+ s.startBalancer();
-assert.soon( function(){
- var d = diff1();
- return d < 5;
-} , "balance didn't happen" , 1000 * 60 * 20 , 1 );
+ assert.soon(function() {
+ var d = diff1();
+ return d < 5;
+ }, "balance didn't happen", 1000 * 60 * 20, 1);
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/sharding_migrate_cursor1.js b/jstests/sharding/sharding_migrate_cursor1.js
index 0edeb2a0ac9..584181cdce2 100644
--- a/jstests/sharding/sharding_migrate_cursor1.js
+++ b/jstests/sharding/sharding_migrate_cursor1.js
@@ -1,81 +1,85 @@
// SERVER-2068
(function() {
-var chunkSize = 25;
+ var chunkSize = 25;
-var s = new ShardingTest({ name: "migrate_cursor1",
- shards: 2,
- mongos: 1,
- other: { chunkSize : chunkSize } });
+ var s = new ShardingTest(
+ {name: "migrate_cursor1", shards: 2, mongos: 1, other: {chunkSize: chunkSize}});
-s.adminCommand( { enablesharding : "test" } );
-db = s.getDB( "test" );
-s.ensurePrimaryShard('test', 'shard0001');
-t = db.foo;
+ s.adminCommand({enablesharding: "test"});
+ db = s.getDB("test");
+ s.ensurePrimaryShard('test', 'shard0001');
+ t = db.foo;
-bigString = "";
-stringSize = 1024;
+ bigString = "";
+ stringSize = 1024;
-while ( bigString.length < stringSize )
- bigString += "asdasdas";
+ while (bigString.length < stringSize)
+ bigString += "asdasdas";
-stringSize = bigString.length;
-docsPerChunk = Math.ceil( ( chunkSize * 1024 * 1024 ) / ( stringSize - 12 ) );
-numChunks = 5;
-numDocs = 20 * docsPerChunk;
+ stringSize = bigString.length;
+ docsPerChunk = Math.ceil((chunkSize * 1024 * 1024) / (stringSize - 12));
+ numChunks = 5;
+ numDocs = 20 * docsPerChunk;
-print( "stringSize: " + stringSize + " docsPerChunk: " + docsPerChunk + " numDocs: " + numDocs );
+ print("stringSize: " + stringSize + " docsPerChunk: " + docsPerChunk + " numDocs: " + numDocs);
-var bulk = t.initializeUnorderedBulkOp();
-for (var i = 0; i < numDocs; i++){
- bulk.insert({ _id: i, s: bigString });
-}
-assert.writeOK(bulk.execute());
+ var bulk = t.initializeUnorderedBulkOp();
+ for (var i = 0; i < numDocs; i++) {
+ bulk.insert({_id: i, s: bigString});
+ }
+ assert.writeOK(bulk.execute());
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
-assert.lt( numChunks , s.config.chunks.find().count() , "initial 1" );
+ assert.lt(numChunks, s.config.chunks.find().count(), "initial 1");
-primary = s.getPrimaryShard( "test" ).getDB( "test" ).foo;
-secondaryName = s.getOther( primary.name );
-secondary = secondaryName.getDB( "test" ).foo;
+ primary = s.getPrimaryShard("test").getDB("test").foo;
+ secondaryName = s.getOther(primary.name);
+ secondary = secondaryName.getDB("test").foo;
-assert.eq( numDocs , primary.count() , "initial 2" );
-assert.eq( 0 , secondary.count() , "initial 3" );
-assert.eq( numDocs , t.count() , "initial 4" );
+ assert.eq(numDocs, primary.count(), "initial 2");
+ assert.eq(0, secondary.count(), "initial 3");
+ assert.eq(numDocs, t.count(), "initial 4");
-x = primary.find( { _id : { $lt : 500 } } ).batchSize(2);
-x.next(); // 1. Create an open cursor
+ x = primary.find({_id: {$lt: 500}}).batchSize(2);
+ x.next(); // 1. Create an open cursor
-print("start moving chunks...");
+ print("start moving chunks...");
-// 2. Move chunk from s0 to s1 without waiting for deletion.
-// Command returns, but the deletion on s0 will block due to the open cursor.
-s.adminCommand( { moveChunk : "test.foo" , find : { _id : 0 } , to : secondaryName.name } );
+ // 2. Move chunk from s0 to s1 without waiting for deletion.
+ // Command returns, but the deletion on s0 will block due to the open cursor.
+ s.adminCommand({moveChunk: "test.foo", find: {_id: 0}, to: secondaryName.name});
-// 3. Start second moveChunk command from s0 to s1.
-// This moveChunk should not observe the above deletion as a 'mod', transfer it to s1 and cause deletion on s1.
-// This moveChunk will wait for deletion.
-join = startParallelShell( "db.x.insert( {x:1} ); db.adminCommand( { moveChunk : 'test.foo' , find : { _id : " + docsPerChunk * 3 + " } , to : '" + secondaryName.name + "', _waitForDelete: true } )" );
-assert.soon( function(){ return db.x.count() > 0; } , "XXX" , 30000 , 1 );
+ // 3. Start second moveChunk command from s0 to s1.
+ // This moveChunk should not observe the above deletion as a 'mod', transfer it to s1 and cause
+ // deletion on s1.
+ // This moveChunk will wait for deletion.
+ join = startParallelShell(
+ "db.x.insert( {x:1} ); db.adminCommand( { moveChunk : 'test.foo' , find : { _id : " +
+ docsPerChunk * 3 + " } , to : '" + secondaryName.name + "', _waitForDelete: true } )");
+ assert.soon(function() {
+ return db.x.count() > 0;
+ }, "XXX", 30000, 1);
-// 4. Close the cursor to enable chunk deletion.
-print( "itcount: " + x.itcount() );
+ // 4. Close the cursor to enable chunk deletion.
+ print("itcount: " + x.itcount());
-x = null;
-for ( i=0; i<5; i++ ) gc();
+ x = null;
+ for (i = 0; i < 5; i++)
+ gc();
-print( "cursor should be gone" );
+ print("cursor should be gone");
-// 5. Waiting for the second moveChunk to finish its deletion.
-// Note the deletion for the first moveChunk may not be finished.
-join();
+ // 5. Waiting for the second moveChunk to finish its deletion.
+ // Note the deletion for the first moveChunk may not be finished.
+ join();
-//assert.soon( function(){ return numDocs == t.count(); } , "at end 1" )
-// 6. Check the total number of docs on both shards to make sure no doc is lost.
-// Use itcount() to ignore orphan docments.
-assert.eq( numDocs , t.find().itcount() , "at end 2" );
+ // assert.soon( function(){ return numDocs == t.count(); } , "at end 1" )
+ // 6. Check the total number of docs on both shards to make sure no doc is lost.
+ // Use itcount() to ignore orphan docments.
+ assert.eq(numDocs, t.find().itcount(), "at end 2");
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/sharding_multiple_ns_rs.js b/jstests/sharding/sharding_multiple_ns_rs.js
index f83d744527e..f3465e3b10d 100644
--- a/jstests/sharding/sharding_multiple_ns_rs.js
+++ b/jstests/sharding/sharding_multiple_ns_rs.js
@@ -1,57 +1,55 @@
(function() {
-var s = new ShardingTest({ name: "Sharding multiple ns",
- shards: 1,
- mongos: 1,
- other: { rs : true , chunkSize: 1 } });
+ var s = new ShardingTest(
+ {name: "Sharding multiple ns", shards: 1, mongos: 1, other: {rs: true, chunkSize: 1}});
-s.adminCommand( { enablesharding : "test" } );
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+ s.adminCommand({enablesharding: "test"});
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
-db = s.getDB( "test" );
+ db = s.getDB("test");
-var bulk = db.foo.initializeUnorderedBulkOp();
-var bulk2 = db.bar.initializeUnorderedBulkOp();
-for ( i=0; i<100; i++ ) {
- bulk.insert({ _id: i, x: i });
- bulk2.insert({ _id: i, x: i });
-}
-assert.writeOK(bulk.execute());
-assert.writeOK(bulk2.execute());
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ var bulk2 = db.bar.initializeUnorderedBulkOp();
+ for (i = 0; i < 100; i++) {
+ bulk.insert({_id: i, x: i});
+ bulk2.insert({_id: i, x: i});
+ }
+ assert.writeOK(bulk.execute());
+ assert.writeOK(bulk2.execute());
-sh.splitAt( "test.foo" , { _id : 50 } );
+ sh.splitAt("test.foo", {_id: 50});
-other = new Mongo( s.s.name );
-dbother = other.getDB( "test" );
+ other = new Mongo(s.s.name);
+ dbother = other.getDB("test");
-assert.eq( 5 , db.foo.findOne( { _id : 5 } ).x );
-assert.eq( 5 , dbother.foo.findOne( { _id : 5 } ).x );
+ assert.eq(5, db.foo.findOne({_id: 5}).x);
+ assert.eq(5, dbother.foo.findOne({_id: 5}).x);
-assert.eq( 5 , db.bar.findOne( { _id : 5 } ).x );
-assert.eq( 5 , dbother.bar.findOne( { _id : 5 } ).x );
+ assert.eq(5, db.bar.findOne({_id: 5}).x);
+ assert.eq(5, dbother.bar.findOne({_id: 5}).x);
-s._rs[0].test.awaitReplication();
-s._rs[0].test.stopMaster(15);
+ s._rs[0].test.awaitReplication();
+ s._rs[0].test.stopMaster(15);
-// Wait for the primary to come back online...
-var primary = s._rs[0].test.getPrimary();
+ // Wait for the primary to come back online...
+ var primary = s._rs[0].test.getPrimary();
-// Wait for the mongos to recognize the new primary...
-ReplSetTest.awaitRSClientHosts( db.getMongo(), primary, { ismaster : true } );
+ // Wait for the mongos to recognize the new primary...
+ ReplSetTest.awaitRSClientHosts(db.getMongo(), primary, {ismaster: true});
-assert.eq( 5 , db.foo.findOne( { _id : 5 } ).x );
-assert.eq( 5 , db.bar.findOne( { _id : 5 } ).x );
+ assert.eq(5, db.foo.findOne({_id: 5}).x);
+ assert.eq(5, db.bar.findOne({_id: 5}).x);
-s.adminCommand( { shardcollection : "test.bar" , key : { _id : 1 } } );
-sh.splitAt( "test.bar" , { _id : 50 } );
+ s.adminCommand({shardcollection: "test.bar", key: {_id: 1}});
+ sh.splitAt("test.bar", {_id: 50});
-yetagain = new Mongo( s.s.name );
-assert.eq( 5 , yetagain.getDB( "test" ).bar.findOne( { _id : 5 } ).x );
-assert.eq( 5 , yetagain.getDB( "test" ).foo.findOne( { _id : 5 } ).x );
+ yetagain = new Mongo(s.s.name);
+ assert.eq(5, yetagain.getDB("test").bar.findOne({_id: 5}).x);
+ assert.eq(5, yetagain.getDB("test").foo.findOne({_id: 5}).x);
-assert.eq( 5 , dbother.bar.findOne( { _id : 5 } ).x );
-assert.eq( 5 , dbother.foo.findOne( { _id : 5 } ).x );
+ assert.eq(5, dbother.bar.findOne({_id: 5}).x);
+ assert.eq(5, dbother.foo.findOne({_id: 5}).x);
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/sharding_options.js b/jstests/sharding/sharding_options.js
index 7fe34a0c140..0841967b18e 100644
--- a/jstests/sharding/sharding_options.js
+++ b/jstests/sharding/sharding_options.js
@@ -2,142 +2,99 @@ var baseName = "jstests_sharding_sharding_options";
load('jstests/libs/command_line/test_parsed_options.js');
-
-
// Move Paranoia
jsTest.log("Testing \"moveParanoia\" command line option");
var expectedResult = {
- "parsed" : {
- "sharding" : {
- "archiveMovedChunks" : true
- }
- }
+ "parsed": {"sharding": {"archiveMovedChunks": true}}
};
-testGetCmdLineOptsMongod({ moveParanoia : "" }, expectedResult);
+testGetCmdLineOptsMongod({moveParanoia: ""}, expectedResult);
jsTest.log("Testing \"noMoveParanoia\" command line option");
expectedResult = {
- "parsed" : {
- "sharding" : {
- "archiveMovedChunks" : false
- }
- }
+ "parsed": {"sharding": {"archiveMovedChunks": false}}
};
-testGetCmdLineOptsMongod({ noMoveParanoia : "" }, expectedResult);
+testGetCmdLineOptsMongod({noMoveParanoia: ""}, expectedResult);
jsTest.log("Testing \"sharding.archiveMovedChunks\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/enable_paranoia.json",
- "sharding" : {
- "archiveMovedChunks" : true
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/enable_paranoia.json",
+ "sharding": {"archiveMovedChunks": true}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/enable_paranoia.json" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/enable_paranoia.json"},
expectedResult);
-
-
// Sharding Role
jsTest.log("Testing \"configsvr\" command line option");
var expectedResult = {
- "parsed" : {
- "sharding" : {
- "clusterRole" : "configsvr"
- },
- "storage" : {
- "journal" : {
- "enabled" : true
- }
- }
- }
+ "parsed":
+ {"sharding": {"clusterRole": "configsvr"}, "storage": {"journal": {"enabled": true}}}
};
-testGetCmdLineOptsMongod({ configsvr : "", journal: "" }, expectedResult);
+testGetCmdLineOptsMongod({configsvr: "", journal: ""}, expectedResult);
jsTest.log("Testing \"shardsvr\" command line option");
expectedResult = {
- "parsed" : {
- "sharding" : {
- "clusterRole" : "shardsvr"
- }
- }
+ "parsed": {"sharding": {"clusterRole": "shardsvr"}}
};
-testGetCmdLineOptsMongod({ shardsvr : "" }, expectedResult);
+testGetCmdLineOptsMongod({shardsvr: ""}, expectedResult);
jsTest.log("Testing \"sharding.clusterRole\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/set_shardingrole.json",
- "sharding" : {
- "clusterRole" : "configsvr"
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/set_shardingrole.json",
+ "sharding": {"clusterRole": "configsvr"}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/set_shardingrole.json" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/set_shardingrole.json"},
expectedResult);
-
-
// Auto Splitting
jsTest.log("Testing \"noAutoSplit\" command line option");
var expectedResult = {
- "parsed" : {
- "sharding" : {
- "autoSplit" : false
- }
- }
+ "parsed": {"sharding": {"autoSplit": false}}
};
-testGetCmdLineOptsMongos({ noAutoSplit : "" }, expectedResult);
+testGetCmdLineOptsMongos({noAutoSplit: ""}, expectedResult);
jsTest.log("Testing \"sharding.autoSplit\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/enable_autosplit.json",
- "sharding" : {
- "autoSplit" : true
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/enable_autosplit.json",
+ "sharding": {"autoSplit": true}
}
};
-testGetCmdLineOptsMongos({ config : "jstests/libs/config_files/enable_autosplit.json" },
+testGetCmdLineOptsMongos({config: "jstests/libs/config_files/enable_autosplit.json"},
expectedResult);
// Test that we preserve switches explicitly set to false in config files. See SERVER-13439.
jsTest.log("Testing explicitly disabled \"moveParanoia\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_moveparanoia.ini",
- "sharding" : {
- "archiveMovedChunks" : false
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_moveparanoia.ini",
+ "sharding": {"archiveMovedChunks": false}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_moveparanoia.ini" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_moveparanoia.ini"},
expectedResult);
jsTest.log("Testing explicitly disabled \"noMoveParanoia\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_nomoveparanoia.ini",
- "sharding" : {
- "archiveMovedChunks" : true
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_nomoveparanoia.ini",
+ "sharding": {"archiveMovedChunks": true}
}
};
-testGetCmdLineOptsMongod({ config : "jstests/libs/config_files/disable_nomoveparanoia.ini" },
+testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_nomoveparanoia.ini"},
expectedResult);
jsTest.log("Testing explicitly disabled \"noAutoSplit\" config file option");
expectedResult = {
- "parsed" : {
- "config" : "jstests/libs/config_files/disable_noautosplit.ini",
- "sharding" : {
- "autoSplit" : true
- }
+ "parsed": {
+ "config": "jstests/libs/config_files/disable_noautosplit.ini",
+ "sharding": {"autoSplit": true}
}
};
-testGetCmdLineOptsMongos({ config : "jstests/libs/config_files/disable_noautosplit.ini" },
+testGetCmdLineOptsMongos({config: "jstests/libs/config_files/disable_noautosplit.ini"},
expectedResult);
-
print(baseName + " succeeded.");
diff --git a/jstests/sharding/sharding_rs1.js b/jstests/sharding/sharding_rs1.js
index 4da40b344d1..3ab25906838 100644
--- a/jstests/sharding/sharding_rs1.js
+++ b/jstests/sharding/sharding_rs1.js
@@ -1,60 +1,59 @@
// tests sharding with replica sets
(function() {
-'use strict';
+ 'use strict';
-var s = new ShardingTest({ shards: 3,
- other: { rs: true, chunkSize: 1, enableBalancer: true }});
+ var s = new ShardingTest({shards: 3, other: {rs: true, chunkSize: 1, enableBalancer: true}});
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'test-rs0');
-s.config.settings.update( { _id: "balancer" }, { $set : { _waitForDelete : true } } , true );
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'test-rs0');
+ s.config.settings.update({_id: "balancer"}, {$set: {_waitForDelete: true}}, true);
-var db = s.getDB("test");
+ var db = s.getDB("test");
-var bigString = "X".repeat(256 * 1024);
+ var bigString = "X".repeat(256 * 1024);
-var insertedBytes = 0;
-var num = 0;
+ var insertedBytes = 0;
+ var num = 0;
-// Insert 10 MB of data to result in 10+ chunks
-var bulk = db.foo.initializeUnorderedBulkOp();
-while (insertedBytes < (10 * 1024 * 1024)) {
- bulk.insert({ _id: num++, s: bigString, x: Math.random() });
- insertedBytes += bigString.length;
-}
-assert.writeOK(bulk.execute({w: 3}));
+ // Insert 10 MB of data to result in 10+ chunks
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ while (insertedBytes < (10 * 1024 * 1024)) {
+ bulk.insert({_id: num++, s: bigString, x: Math.random()});
+ insertedBytes += bigString.length;
+ }
+ assert.writeOK(bulk.execute({w: 3}));
-assert.commandWorked(s.s.adminCommand({ shardcollection: "test.foo" , key: { _id: 1 } }));
+ assert.commandWorked(s.s.adminCommand({shardcollection: "test.foo", key: {_id: 1}}));
-jsTest.log("Waiting for balance to complete");
-s.awaitBalance('foo', 'test', 3 * 60 * 1000);
+ jsTest.log("Waiting for balance to complete");
+ s.awaitBalance('foo', 'test', 3 * 60 * 1000);
-jsTest.log("Stopping balancer");
-s.stopBalancer();
+ jsTest.log("Stopping balancer");
+ s.stopBalancer();
-jsTest.log("Balancer stopped, checking dbhashes");
-s._rs.forEach(function(rsNode) {
- rsNode.test.awaitReplication();
+ jsTest.log("Balancer stopped, checking dbhashes");
+ s._rs.forEach(function(rsNode) {
+ rsNode.test.awaitReplication();
- var dbHashes = rsNode.test.getHashes("test");
- print(rsNode.url + ': ' + tojson(dbHashes));
+ var dbHashes = rsNode.test.getHashes("test");
+ print(rsNode.url + ': ' + tojson(dbHashes));
- for (var j = 0; j < dbHashes.slaves.length; j++) {
- assert.eq(dbHashes.master.md5,
- dbHashes.slaves[j].md5,
- "hashes not same for: " + rsNode.url + " slave: " + j);
- }
-});
+ for (var j = 0; j < dbHashes.slaves.length; j++) {
+ assert.eq(dbHashes.master.md5,
+ dbHashes.slaves[j].md5,
+ "hashes not same for: " + rsNode.url + " slave: " + j);
+ }
+ });
-assert.eq( num , db.foo.find().count() , "C1" );
-assert.eq( num , db.foo.find().itcount() , "C2" );
-assert.eq( num , db.foo.find().sort( { _id : 1 } ).itcount() , "C3" );
-assert.eq( num , db.foo.find().sort( { _id : -1 } ).itcount() , "C4" );
+ assert.eq(num, db.foo.find().count(), "C1");
+ assert.eq(num, db.foo.find().itcount(), "C2");
+ assert.eq(num, db.foo.find().sort({_id: 1}).itcount(), "C3");
+ assert.eq(num, db.foo.find().sort({_id: -1}).itcount(), "C4");
-db.foo.ensureIndex( { x : 1 } );
-assert.eq( num , db.foo.find().sort( { x : 1 } ).itcount() , "C5" );
-assert.eq( num , db.foo.find().sort( { x : -1 } ).itcount() , "C6" );
+ db.foo.ensureIndex({x: 1});
+ assert.eq(num, db.foo.find().sort({x: 1}).itcount(), "C5");
+ assert.eq(num, db.foo.find().sort({x: -1}).itcount(), "C6");
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/sharding_rs2.js b/jstests/sharding/sharding_rs2.js
index affe175eaa4..7c323ac5d44 100644
--- a/jstests/sharding/sharding_rs2.js
+++ b/jstests/sharding/sharding_rs2.js
@@ -10,247 +10,244 @@
//
(function() {
-'use strict';
-
-// The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries
-// from stepping down during migrations on slow evergreen builders.
-var s = new ShardingTest({ shards: 2,
- other: {
- chunkSize: 1,
- rs0: {
- nodes: [
- {rsConfig: {votes: 1}},
- {rsConfig: {priority: 0, votes: 0}},
- ],
- },
- rs1: {
- nodes: [
- {rsConfig: {votes: 1}},
- {rsConfig: {priority: 0, votes: 0}},
- ],
- }
- } });
-
-var db = s.getDB("test");
-var t = db.foo;
-
-s.adminCommand({ enablesharding: "test" });
-s.ensurePrimaryShard('test', 'test-rs0');
-
-// -------------------------------------------------------------------------------------------
-// ---------- test that config server updates when replica set config changes ----------------
-// -------------------------------------------------------------------------------------------
-
-
-db.foo.save({ _id: 5,x: 17 });
-assert.eq(1, db.foo.count());
-
-s.config.databases.find().forEach(printjson);
-s.config.shards.find().forEach(printjson);
-
-var dbPrimaryShardId = s.getPrimaryShardIdForDatabase("test");
-
-function countNodes(){
- var x = s.config.shards.findOne({ _id: dbPrimaryShardId });
- return x.host.split(",").length;
-}
-
-assert.eq(2, countNodes(), "A1");
-
-var rs = s.getRSEntry(dbPrimaryShardId);
-rs.test.add();
-try {
- rs.test.reInitiate();
-}
-catch (e){
- // this os ok as rs's may close connections on a change of master
- print(e);
-}
-
-assert.soon(
- function(){
+ 'use strict';
+
+ // The mongod secondaries are set to priority 0 and votes 0 to prevent the primaries
+ // from stepping down during migrations on slow evergreen builders.
+ var s = new ShardingTest({
+ shards: 2,
+ other: {
+ chunkSize: 1,
+ rs0: {
+ nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}, ],
+ },
+ rs1: {
+ nodes: [{rsConfig: {votes: 1}}, {rsConfig: {priority: 0, votes: 0}}, ],
+ }
+ }
+ });
+
+ var db = s.getDB("test");
+ var t = db.foo;
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'test-rs0');
+
+ // -------------------------------------------------------------------------------------------
+ // ---------- test that config server updates when replica set config changes ----------------
+ // -------------------------------------------------------------------------------------------
+
+ db.foo.save({_id: 5, x: 17});
+ assert.eq(1, db.foo.count());
+
+ s.config.databases.find().forEach(printjson);
+ s.config.shards.find().forEach(printjson);
+
+ var dbPrimaryShardId = s.getPrimaryShardIdForDatabase("test");
+
+ function countNodes() {
+ var x = s.config.shards.findOne({_id: dbPrimaryShardId});
+ return x.host.split(",").length;
+ }
+
+ assert.eq(2, countNodes(), "A1");
+
+ var rs = s.getRSEntry(dbPrimaryShardId);
+ rs.test.add();
+ try {
+ rs.test.reInitiate();
+ } catch (e) {
+ // this os ok as rs's may close connections on a change of master
+ print(e);
+ }
+
+ assert.soon(function() {
try {
printjson(rs.test.getPrimary().getDB("admin").runCommand("isMaster"));
s.config.shards.find().forEach(printjsononeline);
return countNodes() == 3;
- }
- catch (e){
+ } catch (e) {
print(e);
}
}, "waiting for config server to update", 180 * 1000, 1000);
-// cleanup after adding node
-for (var i = 0; i < 5; i++) {
- try {
- db.foo.findOne();
- }
- catch (e) {
-
+ // cleanup after adding node
+ for (var i = 0; i < 5; i++) {
+ try {
+ db.foo.findOne();
+ } catch (e) {
+ }
}
-}
-
-jsTest.log("Awaiting replication of all nodes, so spurious sync'ing queries don't upset our counts...");
-rs.test.awaitReplication();
-// Make sure we wait for secondaries here - otherwise a secondary could come online later and be used for the
-// count command before being fully replicated
-jsTest.log("Awaiting secondary status of all nodes");
-rs.test.waitForState(rs.test.getSecondaries(), ReplSetTest.State.SECONDARY, 180 * 1000);
-
-// -------------------------------------------------------------------------------------------
-// ---------- test routing to slaves ----------------
-// -------------------------------------------------------------------------------------------
-
-// --- not sharded ----
-var m = new Mongo(s.s.name);
-var ts = m.getDB("test").foo;
+ jsTest.log(
+ "Awaiting replication of all nodes, so spurious sync'ing queries don't upset our counts...");
+ rs.test.awaitReplication();
+ // Make sure we wait for secondaries here - otherwise a secondary could come online later and be
+ // used for the
+ // count command before being fully replicated
+ jsTest.log("Awaiting secondary status of all nodes");
+ rs.test.waitForState(rs.test.getSecondaries(), ReplSetTest.State.SECONDARY, 180 * 1000);
-var before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ // -------------------------------------------------------------------------------------------
+ // ---------- test routing to slaves ----------------
+ // -------------------------------------------------------------------------------------------
-for (var i = 0; i < 10; i++) {
- assert.eq(17, ts.findOne().x, "B1");
-}
+ // --- not sharded ----
-m.setSlaveOk();
+ var m = new Mongo(s.s.name);
+ var ts = m.getDB("test").foo;
-for (var i = 0; i < 10; i++) {
- assert.eq(17, ts.findOne().x, "B2");
-}
+ var before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
-var after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
-
-printjson(before);
-printjson(after);
-
-assert.lte(before.query + 10, after.query, "B3");
+ for (var i = 0; i < 10; i++) {
+ assert.eq(17, ts.findOne().x, "B1");
+ }
-// --- add more data ----
+ m.setSlaveOk();
-db.foo.ensureIndex({ x: 1 });
+ for (var i = 0; i < 10; i++) {
+ assert.eq(17, ts.findOne().x, "B2");
+ }
-var bulk = db.foo.initializeUnorderedBulkOp();
-for (var i = 0; i < 100; i++) {
- if (i == 17) continue;
- bulk.insert({ x: i });
-}
-assert.writeOK(bulk.execute({ w: 3 }));
+ var after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
-// Counts pass the options of the connection - which is slaveOk'd, so we need to wait for
-// replication for this and future tests to pass
-rs.test.awaitReplication();
+ printjson(before);
+ printjson(after);
-assert.eq(100, ts.count(), "B4");
-assert.eq(100, ts.find().itcount(), "B5");
-assert.eq(100, ts.find().batchSize(5).itcount(), "B6");
+ assert.lte(before.query + 10, after.query, "B3");
-t.find().batchSize(3).next();
-gc(); gc(); gc();
+ // --- add more data ----
-// --- sharded ----
+ db.foo.ensureIndex({x: 1});
-assert.eq(100, db.foo.count(), "C1");
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ for (var i = 0; i < 100; i++) {
+ if (i == 17)
+ continue;
+ bulk.insert({x: i});
+ }
+ assert.writeOK(bulk.execute({w: 3}));
-assert.commandWorked(s.s0.adminCommand({ shardcollection: "test.foo", key: { x: 1 } }));
+ // Counts pass the options of the connection - which is slaveOk'd, so we need to wait for
+ // replication for this and future tests to pass
+ rs.test.awaitReplication();
-// We're doing some manual chunk stuff, so stop the balancer first
-s.stopBalancer();
+ assert.eq(100, ts.count(), "B4");
+ assert.eq(100, ts.find().itcount(), "B5");
+ assert.eq(100, ts.find().batchSize(5).itcount(), "B6");
-assert.eq(100, t.count(), "C2");
-assert.commandWorked(s.s0.adminCommand({ split: "test.foo", middle: { x: 50 } }));
+ t.find().batchSize(3).next();
+ gc();
+ gc();
+ gc();
-s.printShardingStatus();
+ // --- sharded ----
-var other = s.config.shards.findOne({ _id: { $ne: dbPrimaryShardId } });
-assert.commandWorked(s.getDB('admin').runCommand({ moveChunk: "test.foo",
- find: { x: 10 },
- to: other._id,
- _secondaryThrottle: true,
- writeConcern: { w: 2 },
- _waitForDelete: true }));
-assert.eq(100, t.count(), "C3");
+ assert.eq(100, db.foo.count(), "C1");
-assert.eq(50, rs.test.getPrimary().getDB("test").foo.count(), "C4");
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {x: 1}}));
-// by non-shard key
+ // We're doing some manual chunk stuff, so stop the balancer first
+ s.stopBalancer();
-m = new Mongo(s.s.name);
-ts = m.getDB("test").foo;
+ assert.eq(100, t.count(), "C2");
+ assert.commandWorked(s.s0.adminCommand({split: "test.foo", middle: {x: 50}}));
-before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ s.printShardingStatus();
-for (var i = 0; i < 10; i++) {
- assert.eq(17, ts.findOne({ _id: 5 }).x, "D1");
-}
+ var other = s.config.shards.findOne({_id: {$ne: dbPrimaryShardId}});
+ assert.commandWorked(s.getDB('admin').runCommand({
+ moveChunk: "test.foo",
+ find: {x: 10},
+ to: other._id,
+ _secondaryThrottle: true,
+ writeConcern: {w: 2},
+ _waitForDelete: true
+ }));
+ assert.eq(100, t.count(), "C3");
-m.setSlaveOk();
-for (var i = 0; i < 10; i++) {
- assert.eq(17, ts.findOne({ _id: 5 }).x, "D2");
-}
+ assert.eq(50, rs.test.getPrimary().getDB("test").foo.count(), "C4");
-after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ // by non-shard key
-assert.lte(before.query + 10, after.query, "D3");
+ m = new Mongo(s.s.name);
+ ts = m.getDB("test").foo;
-// by shard key
+ before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
-m = new Mongo(s.s.name);
-m.forceWriteMode("commands");
+ for (var i = 0; i < 10; i++) {
+ assert.eq(17, ts.findOne({_id: 5}).x, "D1");
+ }
-s.printShardingStatus();
+ m.setSlaveOk();
+ for (var i = 0; i < 10; i++) {
+ assert.eq(17, ts.findOne({_id: 5}).x, "D2");
+ }
-ts = m.getDB("test").foo;
+ after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
-before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ assert.lte(before.query + 10, after.query, "D3");
-for (var i = 0; i < 10; i++) {
- assert.eq(57, ts.findOne({ x: 57 }).x, "E1");
-}
+ // by shard key
-m.setSlaveOk();
-for (var i = 0; i < 10; i++) {
- assert.eq(57, ts.findOne({ x: 57 }).x, "E2");
-}
+ m = new Mongo(s.s.name);
+ m.forceWriteMode("commands");
-after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+ s.printShardingStatus();
-assert.lte(before.query + 10, after.query, "E3");
+ ts = m.getDB("test").foo;
-assert.eq(100, ts.count(), "E4");
-assert.eq(100, ts.find().itcount(), "E5");
-printjson(ts.find().batchSize(5).explain());
+ before = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
-// fsyncLock the secondaries
-rs.test.getSecondaries().forEach(function(secondary) {
- assert.commandWorked(secondary.getDB("test").fsyncLock());
-});
-// Modify data only on the primary replica of the primary shard.
-// { x: 60 } goes to the shard of "rs", which is the primary shard.
-assert.writeOK(ts.insert({ primaryOnly: true, x: 60 }));
-// Read from secondary through mongos, the doc is not there due to replication delay or fsync.
-// But we can guarantee not to read from primary.
-assert.eq(0, ts.find({ primaryOnly: true, x: 60 }).itcount());
-// Unlock the secondaries
-rs.test.getSecondaries().forEach(function(secondary) {
- secondary.getDB("test").fsyncUnlock();
-});
-// Clean up the data
-assert.writeOK(ts.remove({ primaryOnly: true, x: 60 }, { writeConcern: { w: 3 }}));
+ for (var i = 0; i < 10; i++) {
+ assert.eq(57, ts.findOne({x: 57}).x, "E1");
+ }
-for (var i = 0; i < 10; i++) {
- m = new Mongo(s.s.name);
m.setSlaveOk();
- ts = m.getDB("test").foo;
- assert.eq(100, ts.find().batchSize(5).itcount(), "F2." + i);
-}
+ for (var i = 0; i < 10; i++) {
+ assert.eq(57, ts.findOne({x: 57}).x, "E2");
+ }
-for (var i = 0; i < 10; i++) {
- m = new Mongo(s.s.name);
- ts = m.getDB("test").foo;
- assert.eq(100, ts.find().batchSize(5).itcount(), "F3." + i);
-}
+ after = rs.test.getPrimary().adminCommand("serverStatus").opcounters;
+
+ assert.lte(before.query + 10, after.query, "E3");
+
+ assert.eq(100, ts.count(), "E4");
+ assert.eq(100, ts.find().itcount(), "E5");
+ printjson(ts.find().batchSize(5).explain());
+
+ // fsyncLock the secondaries
+ rs.test.getSecondaries().forEach(function(secondary) {
+ assert.commandWorked(secondary.getDB("test").fsyncLock());
+ });
+ // Modify data only on the primary replica of the primary shard.
+ // { x: 60 } goes to the shard of "rs", which is the primary shard.
+ assert.writeOK(ts.insert({primaryOnly: true, x: 60}));
+ // Read from secondary through mongos, the doc is not there due to replication delay or fsync.
+ // But we can guarantee not to read from primary.
+ assert.eq(0, ts.find({primaryOnly: true, x: 60}).itcount());
+ // Unlock the secondaries
+ rs.test.getSecondaries().forEach(function(secondary) {
+ secondary.getDB("test").fsyncUnlock();
+ });
+ // Clean up the data
+ assert.writeOK(ts.remove({primaryOnly: true, x: 60}, {writeConcern: {w: 3}}));
+
+ for (var i = 0; i < 10; i++) {
+ m = new Mongo(s.s.name);
+ m.setSlaveOk();
+ ts = m.getDB("test").foo;
+ assert.eq(100, ts.find().batchSize(5).itcount(), "F2." + i);
+ }
+
+ for (var i = 0; i < 10; i++) {
+ m = new Mongo(s.s.name);
+ ts = m.getDB("test").foo;
+ assert.eq(100, ts.find().batchSize(5).itcount(), "F3." + i);
+ }
-printjson(db.adminCommand("getShardMap"));
+ printjson(db.adminCommand("getShardMap"));
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/sharding_state_after_stepdown.js b/jstests/sharding/sharding_state_after_stepdown.js
index 319f00cceaf..f5d9896a0d0 100644
--- a/jstests/sharding/sharding_state_after_stepdown.js
+++ b/jstests/sharding/sharding_state_after_stepdown.js
@@ -6,158 +6,181 @@
// @tags: [requires_persistence]
(function() {
-'use strict';
-
-var st = new ShardingTest({ shards: 2,
- mongos: 1,
- other: {
- rs: true,
- rsOptions: { nodes : 1 }
- }
- });
-
-var mongos = st.s0;
-var admin = mongos.getDB("admin");
-var shards = mongos.getCollection("config.shards").find().toArray();
-
-var coll = mongos.getCollection("foo.bar");
-var collSharded = mongos.getCollection("foo.barSharded");
-
-assert.commandWorked(admin.runCommand({ enableSharding : coll.getDB() + "" }));
-st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
-assert.commandWorked(admin.runCommand({ shardCollection : collSharded.toString(),
- key : { _id : 1 } }));
-assert.commandWorked(admin.runCommand({ moveChunk : collSharded.toString(),
- find : { _id : 0 },
- to : shards[1]._id }));
-
-assert.writeOK(coll.insert({ some : "data" }));
-assert.writeOK(collSharded.insert({ some : "data" }));
-assert.eq(2, mongos.adminCommand({ getShardVersion : collSharded.toString() }).version.t);
-
-st.printShardingStatus();
-
-// Restart both primaries to reset our sharding data
-var restartPrimaries = function() {
- var rs0Primary = st.rs0.getPrimary();
- var rs1Primary = st.rs1.getPrimary();
-
- st.rs0.stop(rs0Primary);
- st.rs1.stop(rs1Primary);
-
- ReplSetTest.awaitRSClientHosts(mongos, [rs0Primary, rs1Primary], { ok : false });
-
- st.rs0.start(rs0Primary, { restart : true });
- st.rs1.start(rs1Primary, { restart : true });
-
- ReplSetTest.awaitRSClientHosts(mongos, [rs0Primary, rs1Primary], { ismaster : true });
-};
-
-restartPrimaries();
-
-// Sharding data gets initialized either when shards are hit by an unsharded query or if some
-// metadata operation was run before the step down, which wrote a minOpTime recovery record (CSRS
-// only). In this case we did a moveChunk above from shard0 to shard1, so we will have this record
-// on shard0.
-if (st.configRS) {
- assert.neq("",
- st.rs0.getPrimary().adminCommand({ getShardVersion: coll.toString() }).configServer);
-}
-else {
- assert.eq("",
- st.rs0.getPrimary().adminCommand({ getShardVersion: coll.toString() }).configServer);
-}
-assert.eq("",
- st.rs1.getPrimary().adminCommand({ getShardVersion : coll.toString() }).configServer);
-
-// Doing a find only accesses the primary (rs0), which is already recovered. Ensure that the
-// secondary still has no sharding knowledge.
-assert.neq(null, coll.findOne({}));
-assert.eq("",
- st.rs1.getPrimary().adminCommand({ getShardVersion : coll.toString() }).configServer);
-
-//
-//
-// Sharding data initialized when shards are hit by a sharded query
-assert.neq(null, collSharded.findOne({}));
-assert.neq("",
- st.rs0.getPrimary().adminCommand({ getShardVersion : coll.toString() }).configServer);
-assert.neq("",
- st.rs1.getPrimary().adminCommand({ getShardVersion : coll.toString() }).configServer);
-
-
-// Stepdown both primaries to reset our sharding data
-var stepDownPrimaries = function() {
-
- var rs0Primary = st.rs0.getPrimary();
- var rs1Primary = st.rs1.getPrimary();
-
- try {
- rs0Primary.adminCommand({ replSetStepDown : 1000 * 1000, force : true });
- assert(false);
- }
- catch(ex) {
- // Expected connection exception, will check for stepdown later
- }
-
- try {
- rs1Primary.adminCommand({ replSetStepDown : 1000 * 1000, force : true });
- assert(false);
- }
- catch(ex) {
- // Expected connection exception, will check for stepdown later
+ 'use strict';
+
+ var st = new ShardingTest({shards: 2, mongos: 1, other: {rs: true, rsOptions: {nodes: 1}}});
+
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var shards = mongos.getCollection("config.shards").find().toArray();
+
+ var coll = mongos.getCollection("foo.bar");
+ var collSharded = mongos.getCollection("foo.barSharded");
+
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
+ assert.commandWorked(
+ admin.runCommand({shardCollection: collSharded.toString(), key: {_id: 1}}));
+ assert.commandWorked(
+ admin.runCommand({moveChunk: collSharded.toString(), find: {_id: 0}, to: shards[1]._id}));
+
+ assert.writeOK(coll.insert({some: "data"}));
+ assert.writeOK(collSharded.insert({some: "data"}));
+ assert.eq(2, mongos.adminCommand({getShardVersion: collSharded.toString()}).version.t);
+
+ st.printShardingStatus();
+
+ // Restart both primaries to reset our sharding data
+ var restartPrimaries = function() {
+ var rs0Primary = st.rs0.getPrimary();
+ var rs1Primary = st.rs1.getPrimary();
+
+ st.rs0.stop(rs0Primary);
+ st.rs1.stop(rs1Primary);
+
+ ReplSetTest.awaitRSClientHosts(mongos,
+ [rs0Primary, rs1Primary],
+ {
+ ok:
+ false
+ });
+
+ st.rs0.start(rs0Primary, {restart: true});
+ st.rs1.start(rs1Primary, {restart: true});
+
+ ReplSetTest.awaitRSClientHosts(mongos,
+ [rs0Primary, rs1Primary],
+ {
+ ismaster:
+ true
+ });
+ };
+
+ restartPrimaries();
+
+ // Sharding data gets initialized either when shards are hit by an unsharded query or if some
+ // metadata operation was run before the step down, which wrote a minOpTime recovery record
+ // (CSRS
+ // only). In this case we did a moveChunk above from shard0 to shard1, so we will have this
+ // record
+ // on shard0.
+ if (st.configRS) {
+ assert.neq(
+ "", st.rs0.getPrimary().adminCommand({getShardVersion: coll.toString()}).configServer);
+ } else {
+ assert.eq(
+ "", st.rs0.getPrimary().adminCommand({getShardVersion: coll.toString()}).configServer);
}
+ assert.eq("",
+ st.rs1.getPrimary().adminCommand({getShardVersion: coll.toString()}).configServer);
- ReplSetTest.awaitRSClientHosts(mongos, [rs0Primary, rs1Primary], { secondary : true });
-
- assert.commandWorked(new Mongo(rs0Primary.host).adminCommand({ replSetFreeze : 0 }));
- assert.commandWorked(new Mongo(rs1Primary.host).adminCommand({ replSetFreeze : 0 }));
-
- rs0Primary = st.rs0.getPrimary();
- rs1Primary = st.rs1.getPrimary();
-
- // Flush connections to avoid transient issues with conn pooling
- assert.commandWorked(rs0Primary.adminCommand({ connPoolSync : true }));
- assert.commandWorked(rs1Primary.adminCommand({ connPoolSync : true }));
-
- ReplSetTest.awaitRSClientHosts(mongos, [rs0Primary, rs1Primary], { ismaster : true });
-};
-
-stepDownPrimaries();
-
-//
-//
-// No sharding metadata until shards are hit by a metadata operation
-assert.eq({},
- st.rs0.getPrimary().adminCommand(
- { getShardVersion : collSharded.toString(), fullMetadata : true }).metadata);
-assert.eq({},
- st.rs1.getPrimary().adminCommand(
- { getShardVersion : collSharded.toString(), fullMetadata : true }).metadata);
-
-//
-//
-// Metadata commands should enable sharding data implicitly
-assert.commandWorked(mongos.adminCommand({ split : collSharded.toString(), middle : { _id : 0 }}));
-assert.eq({},
- st.rs0.getPrimary().adminCommand(
- { getShardVersion : collSharded.toString(), fullMetadata : true }).metadata);
-assert.neq({},
- st.rs1.getPrimary().adminCommand(
- { getShardVersion : collSharded.toString(), fullMetadata : true }).metadata);
+ // Doing a find only accesses the primary (rs0), which is already recovered. Ensure that the
+ // secondary still has no sharding knowledge.
+ assert.neq(null, coll.findOne({}));
+ assert.eq("",
+ st.rs1.getPrimary().adminCommand({getShardVersion: coll.toString()}).configServer);
-//
-//
-// MoveChunk command should enable sharding data implicitly on TO-shard
-assert.commandWorked(mongos.adminCommand({ moveChunk : collSharded.toString(), find : { _id : 0 },
- to : shards[0]._id }));
-assert.neq({},
- st.rs0.getPrimary().adminCommand(
- { getShardVersion : collSharded.toString(), fullMetadata : true }).metadata);
-assert.neq({},
- st.rs1.getPrimary().adminCommand(
- { getShardVersion : collSharded.toString(), fullMetadata : true }).metadata);
-
-st.stop();
+ //
+ //
+ // Sharding data initialized when shards are hit by a sharded query
+ assert.neq(null, collSharded.findOne({}));
+ assert.neq("",
+ st.rs0.getPrimary().adminCommand({getShardVersion: coll.toString()}).configServer);
+ assert.neq("",
+ st.rs1.getPrimary().adminCommand({getShardVersion: coll.toString()}).configServer);
+
+ // Stepdown both primaries to reset our sharding data
+ var stepDownPrimaries = function() {
+
+ var rs0Primary = st.rs0.getPrimary();
+ var rs1Primary = st.rs1.getPrimary();
+
+ try {
+ rs0Primary.adminCommand({replSetStepDown: 1000 * 1000, force: true});
+ assert(false);
+ } catch (ex) {
+ // Expected connection exception, will check for stepdown later
+ }
+
+ try {
+ rs1Primary.adminCommand({replSetStepDown: 1000 * 1000, force: true});
+ assert(false);
+ } catch (ex) {
+ // Expected connection exception, will check for stepdown later
+ }
+
+ ReplSetTest.awaitRSClientHosts(mongos,
+ [rs0Primary, rs1Primary],
+ {
+ secondary:
+ true
+ });
+
+ assert.commandWorked(new Mongo(rs0Primary.host).adminCommand({replSetFreeze: 0}));
+ assert.commandWorked(new Mongo(rs1Primary.host).adminCommand({replSetFreeze: 0}));
+
+ rs0Primary = st.rs0.getPrimary();
+ rs1Primary = st.rs1.getPrimary();
+
+ // Flush connections to avoid transient issues with conn pooling
+ assert.commandWorked(rs0Primary.adminCommand({connPoolSync: true}));
+ assert.commandWorked(rs1Primary.adminCommand({connPoolSync: true}));
+
+ ReplSetTest.awaitRSClientHosts(mongos,
+ [rs0Primary, rs1Primary],
+ {
+ ismaster:
+ true
+ });
+ };
+
+ stepDownPrimaries();
+
+ //
+ //
+ // No sharding metadata until shards are hit by a metadata operation
+ assert.eq({},
+ st.rs0.getPrimary().adminCommand({
+ getShardVersion: collSharded.toString(),
+ fullMetadata: true
+ }).metadata);
+ assert.eq({},
+ st.rs1.getPrimary().adminCommand({
+ getShardVersion: collSharded.toString(),
+ fullMetadata: true
+ }).metadata);
+
+ //
+ //
+ // Metadata commands should enable sharding data implicitly
+ assert.commandWorked(mongos.adminCommand({split: collSharded.toString(), middle: {_id: 0}}));
+ assert.eq({},
+ st.rs0.getPrimary().adminCommand({
+ getShardVersion: collSharded.toString(),
+ fullMetadata: true
+ }).metadata);
+ assert.neq({},
+ st.rs1.getPrimary().adminCommand({
+ getShardVersion: collSharded.toString(),
+ fullMetadata: true
+ }).metadata);
+
+ //
+ //
+ // MoveChunk command should enable sharding data implicitly on TO-shard
+ assert.commandWorked(mongos.adminCommand(
+ {moveChunk: collSharded.toString(), find: {_id: 0}, to: shards[0]._id}));
+ assert.neq({},
+ st.rs0.getPrimary().adminCommand({
+ getShardVersion: collSharded.toString(),
+ fullMetadata: true
+ }).metadata);
+ assert.neq({},
+ st.rs1.getPrimary().adminCommand({
+ getShardVersion: collSharded.toString(),
+ fullMetadata: true
+ }).metadata);
+
+ st.stop();
})();
diff --git a/jstests/sharding/sharding_system_namespaces.js b/jstests/sharding/sharding_system_namespaces.js
index 27ba183dd64..f721c07634d 100644
--- a/jstests/sharding/sharding_system_namespaces.js
+++ b/jstests/sharding/sharding_system_namespaces.js
@@ -11,7 +11,7 @@
// P.S. wiredtiger options are not valid for MMAPv1, but MMAPv1 will
// keep and ignore them.
-var st = new ShardingTest({ shards : 2 });
+var st = new ShardingTest({shards: 2});
var db = st.s.getDB("test");
var coll = db.sharding_system_namespaces;
@@ -24,48 +24,39 @@ var storageEngines = st.shard0.getDB("local").serverBuildInfo().storageEngines;
print("Supported storage engines: " + storageEngines);
if (Array.contains(storageEngines, "wiredTiger")) {
-
function checkCollectionOptions(database) {
- var collectionsInfos = database.getCollectionInfos();
- printjson(collectionsInfos);
- var info = collectionsInfos.filter(function(c) {
- return c.name == "sharding_system_namespaces";
- })[0];
- assert.eq(info.options.storageEngine.wiredTiger.configString, "block_compressor=zlib");
+ var collectionsInfos = database.getCollectionInfos();
+ printjson(collectionsInfos);
+ var info = collectionsInfos.filter(function(c) {
+ return c.name == "sharding_system_namespaces";
+ })[0];
+ assert.eq(info.options.storageEngine.wiredTiger.configString, "block_compressor=zlib");
}
db.createCollection("sharding_system_namespaces",
- {
- storageEngine: {
- wiredTiger: { configString: "block_compressor=zlib" }
- }
- });
+ {storageEngine: {wiredTiger: {configString: "block_compressor=zlib"}}});
checkCollectionOptions(db);
- assert.commandWorked(db.adminCommand({ enableSharding: 'test' }));
+ assert.commandWorked(db.adminCommand({enableSharding: 'test'}));
st.ensurePrimaryShard('test', 'shard0001');
- assert.commandWorked(db.adminCommand({ shardCollection: coll + '', key: { x: 1 }}));
+ assert.commandWorked(db.adminCommand({shardCollection: coll + '', key: {x: 1}}));
coll.insert({x: 0});
coll.insert({x: 10});
- assert.commandWorked(db.adminCommand({ split: coll + '', middle: { x: 5 }}));
+ assert.commandWorked(db.adminCommand({split: coll + '', middle: {x: 5}}));
st.printShardingStatus();
var primaryShard = st.getPrimaryShard("test");
- anotherShard = st.getOther( primaryShard );
- assert.commandWorked(db.adminCommand({
- movechunk: coll + '',
- find: { x: 5 },
- to: anotherShard.name
- }));
+ anotherShard = st.getOther(primaryShard);
+ assert.commandWorked(
+ db.adminCommand({movechunk: coll + '', find: {x: 5}, to: anotherShard.name}));
st.printShardingStatus();
checkCollectionOptions(anotherShard.getDB("test"));
-}
-else {
+} else {
print("Skipping test. wiredTiger engine not supported by mongod binary.");
}
diff --git a/jstests/sharding/sort1.js b/jstests/sharding/sort1.js
index 2ff8e1a4daf..57bae9dc390 100644
--- a/jstests/sharding/sort1.js
+++ b/jstests/sharding/sort1.js
@@ -1,106 +1,110 @@
(function() {
-var s = new ShardingTest({ name: "sort1",
- shards: 2,
- mongos: 2 });
-
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.data" , key : { 'sub.num' : 1 } } );
-
-db = s.getDB( "test" );
-
-N = 100;
-
-forward = [];
-backward = [];
-for ( i=0; i<N; i++ ){
- db.data.insert( { _id : i , sub: {num : i , x : N - i }} );
- forward.push( i );
- backward.push( ( N - 1 ) - i );
-}
-
-s.adminCommand( { split : "test.data" , middle : { 'sub.num' : 33 } } );
-s.adminCommand( { split : "test.data" , middle : { 'sub.num' : 66 } } );
-
-s.adminCommand({ movechunk : "test.data",
- find : { 'sub.num' : 50 },
- to : s.getOther( s.getPrimaryShard( "test" ) ).name,
- waitForDelete : true });
-
-assert.lte( 3 , s.config.chunks.find().itcount() , "A1" );
-
-temp = s.config.chunks.find().sort( { min : 1 } ).toArray();
-temp.forEach( printjsononeline );
-
-z = 0;
-for ( ; z<temp.length; z++ )
- if ( temp[z].min["sub.num"] <= 50 && temp[z].max["sub.num"] > 50 )
- break;
-
-assert.eq( temp[z-1].shard , temp[z+1].shard , "A2" );
-assert.neq( temp[z-1].shard , temp[z].shard , "A3" );
-
-temp = db.data.find().sort( { 'sub.num' : 1 } ).toArray();
-assert.eq( N , temp.length , "B1" );
-for ( i=0; i<100; i++ ){
- assert.eq( i , temp[i].sub.num , "B2" );
-}
-
-
-db.data.find().sort( { 'sub.num' : 1 } ).toArray();
-s.getPrimaryShard("test").getDB( "test" ).data.find().sort( { 'sub.num' : 1 } ).toArray();
-
-a = Date.timeFunc( function(){ z = db.data.find().sort( { 'sub.num' : 1 } ).toArray(); } , 200 );
-assert.eq( 100 , z.length , "C1" );
-b = 1.5 * Date.timeFunc( function(){
- z = s.getPrimaryShard("test").getDB("test").data.find().sort({'sub.num' : 1}).toArray();
- }, 200 );
-assert.eq( 67 , z.length , "C2" );
-
-print( "a: " + a + " b:" + b + " mongos slow down: " + Math.ceil( 100 * ( ( a - b ) / b ) ) + "%" );
-
-// -- secondary index sorting
-
-function getSorted( by , dir , proj ){
- var s = {};
- s[by] = dir || 1;
- printjson( s );
- var cur = db.data.find( {} , proj || {} ).sort( s );
- return terse( cur.map( function(z){ return z.sub.num; } ) );
-}
-
-function terse( a ){
- var s = "";
- for ( var i=0; i<a.length; i++ ){
- if ( i > 0 )
- s += ",";
- s += a[i];
+ var s = new ShardingTest({name: "sort1", shards: 2, mongos: 2});
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.data", key: {'sub.num': 1}});
+
+ db = s.getDB("test");
+
+ N = 100;
+
+ forward = [];
+ backward = [];
+ for (i = 0; i < N; i++) {
+ db.data.insert({_id: i, sub: {num: i, x: N - i}});
+ forward.push(i);
+ backward.push((N - 1) - i);
+ }
+
+ s.adminCommand({split: "test.data", middle: {'sub.num': 33}});
+ s.adminCommand({split: "test.data", middle: {'sub.num': 66}});
+
+ s.adminCommand({
+ movechunk: "test.data",
+ find: {'sub.num': 50},
+ to: s.getOther(s.getPrimaryShard("test")).name,
+ waitForDelete: true
+ });
+
+ assert.lte(3, s.config.chunks.find().itcount(), "A1");
+
+ temp = s.config.chunks.find().sort({min: 1}).toArray();
+ temp.forEach(printjsononeline);
+
+ z = 0;
+ for (; z < temp.length; z++)
+ if (temp[z].min["sub.num"] <= 50 && temp[z].max["sub.num"] > 50)
+ break;
+
+ assert.eq(temp[z - 1].shard, temp[z + 1].shard, "A2");
+ assert.neq(temp[z - 1].shard, temp[z].shard, "A3");
+
+ temp = db.data.find().sort({'sub.num': 1}).toArray();
+ assert.eq(N, temp.length, "B1");
+ for (i = 0; i < 100; i++) {
+ assert.eq(i, temp[i].sub.num, "B2");
+ }
+
+ db.data.find().sort({'sub.num': 1}).toArray();
+ s.getPrimaryShard("test").getDB("test").data.find().sort({'sub.num': 1}).toArray();
+
+ a = Date.timeFunc(function() {
+ z = db.data.find().sort({'sub.num': 1}).toArray();
+ }, 200);
+ assert.eq(100, z.length, "C1");
+ b = 1.5 *
+ Date.timeFunc(function() {
+ z = s.getPrimaryShard("test").getDB("test").data.find().sort({'sub.num': 1}).toArray();
+ }, 200);
+ assert.eq(67, z.length, "C2");
+
+ print("a: " + a + " b:" + b + " mongos slow down: " + Math.ceil(100 * ((a - b) / b)) + "%");
+
+ // -- secondary index sorting
+
+ function getSorted(by, dir, proj) {
+ var s = {};
+ s[by] = dir || 1;
+ printjson(s);
+ var cur = db.data.find({}, proj || {}).sort(s);
+ return terse(cur.map(function(z) {
+ return z.sub.num;
+ }));
+ }
+
+ function terse(a) {
+ var s = "";
+ for (var i = 0; i < a.length; i++) {
+ if (i > 0)
+ s += ",";
+ s += a[i];
+ }
+ return s;
}
- return s;
-}
-forward = terse(forward);
-backward = terse(backward);
+ forward = terse(forward);
+ backward = terse(backward);
-assert.eq( forward , getSorted( "sub.num" , 1 ) , "D1" );
-assert.eq( backward , getSorted( "sub.num" , -1 ) , "D2" );
+ assert.eq(forward, getSorted("sub.num", 1), "D1");
+ assert.eq(backward, getSorted("sub.num", -1), "D2");
-assert.eq( backward , getSorted( "sub.x" , 1 ) , "D3" );
-assert.eq( forward , getSorted( "sub.x" , -1 ) , "D4" );
+ assert.eq(backward, getSorted("sub.x", 1), "D3");
+ assert.eq(forward, getSorted("sub.x", -1), "D4");
-assert.eq( backward , getSorted( "sub.x" , 1 , { 'sub.num' : 1 } ) , "D5" );
-assert.eq( forward , getSorted( "sub.x" , -1 , { 'sub.num' : 1 } ) , "D6" );
+ assert.eq(backward, getSorted("sub.x", 1, {'sub.num': 1}), "D5");
+ assert.eq(forward, getSorted("sub.x", -1, {'sub.num': 1}), "D6");
-assert.eq( backward , getSorted( "sub.x" , 1 , { 'sub' : 1 } ) , "D7" );
-assert.eq( forward , getSorted( "sub.x" , -1 , { 'sub' : 1 } ) , "D8" );
+ assert.eq(backward, getSorted("sub.x", 1, {'sub': 1}), "D7");
+ assert.eq(forward, getSorted("sub.x", -1, {'sub': 1}), "D8");
-assert.eq( backward , getSorted( "sub.x" , 1 , { '_id' : 0 } ) , "D9" );
-assert.eq( forward , getSorted( "sub.x" , -1 , { '_id' : 0 } ) , "D10" );
+ assert.eq(backward, getSorted("sub.x", 1, {'_id': 0}), "D9");
+ assert.eq(forward, getSorted("sub.x", -1, {'_id': 0}), "D10");
-assert.eq( backward , getSorted( "sub.x" , 1 , { '_id' : 0, 'sub.num':1 } ) , "D11" );
-assert.eq( forward , getSorted( "sub.x" , -1 , { '_id' : 0, 'sub.num':1 } ) , "D12" );
+ assert.eq(backward, getSorted("sub.x", 1, {'_id': 0, 'sub.num': 1}), "D11");
+ assert.eq(forward, getSorted("sub.x", -1, {'_id': 0, 'sub.num': 1}), "D12");
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/split_chunk.js b/jstests/sharding/split_chunk.js
index 0f3a33f324d..96368ff8023 100644
--- a/jstests/sharding/split_chunk.js
+++ b/jstests/sharding/split_chunk.js
@@ -5,11 +5,11 @@
* either the upper or lower bound of the entire shard key space.
*/
-var st = new ShardingTest({ shards: 1 });
+var st = new ShardingTest({shards: 1});
st.stopBalancer();
var testDB = st.s.getDB('test');
-testDB.adminCommand({ enableSharding: 'test' });
+testDB.adminCommand({enableSharding: 'test'});
var callSplit = function(db, minKey, maxKey, splitPoints) {
var res = st.s.adminCommand({getShardVersion: "test.user"});
@@ -20,111 +20,115 @@ var callSplit = function(db, minKey, maxKey, splitPoints) {
from: 'shard0000',
min: minKey,
max: maxKey,
- keyPattern: { x: 1 },
+ keyPattern: {x: 1},
splitKeys: splitPoints,
shardVersion: shardVersion,
});
};
var tests = [
-//
-// Lower extreme chunk tests.
-//
-
-// All chunks have 1 doc.
-//
-// Expected doc counts for new chunks:
-// [ MinKey, -2 ): 1
-// [ -2, -1 ): 1
-// [ -1, 0): 1
-//
-function(db) {
- var res = callSplit(db, { x: MinKey }, { x: 0 }, [{ x: -2 }, { x: -1 }]);
- assert.commandWorked(res);
- assert.neq(res.shouldMigrate, null, tojson(res));
- assert(bsonWoCompare(res.shouldMigrate.min, { x: MinKey }) == 0, tojson(res.shouldMigrate.min));
- assert(bsonWoCompare(res.shouldMigrate.max, { x: -2 }) == 0, tojson(res.shouldMigrate.max));
-},
-
-// One chunk has single doc, extreme doesn't.
-//
-// Expected doc counts for new chunks:
-// [ MinKey, -1 ): 2
-// [ -1, 0): 1
-//
-function(db) {
- var res = callSplit(db, { x: MinKey }, { x: 0 }, [{ x: -1 }]);
- assert.commandWorked(res);
- assert.eq(res.shouldMigrate, null, tojson(res));
-},
-
-// Only extreme has single doc.
-//
-// Expected doc counts for new chunks:
-// [ MinKey, -2 ): 1
-// [ -2, 0): 2
-//
-function(db) {
- var res = callSplit(db, { x: MinKey }, { x: 0 }, [{ x: -2 }]);
- assert.commandWorked(res);
- assert.neq(res.shouldMigrate, null, tojson(res));
- assert(bsonWoCompare(res.shouldMigrate.min, { x: MinKey }) == 0, tojson(res.shouldMigrate.min));
- assert(bsonWoCompare(res.shouldMigrate.max, { x: -2 }) == 0, tojson(res.shouldMigrate.max));
-},
-
-//
-// Upper extreme chunk tests.
-//
-
-// All chunks have 1 doc.
-//
-// Expected doc counts for new chunks:
-// [ 0, 1 ): 1
-// [ 1, 2 ): 1
-// [ 2, MaxKey): 1
-//
-function(db) {
- var res = callSplit(db, { x: 0 }, { x: MaxKey }, [{ x: 1 }, { x: 2 }]);
- assert.commandWorked(res);
- assert.neq(res.shouldMigrate, null, tojson(res));
- assert(bsonWoCompare(res.shouldMigrate.min, { x: 2 }) == 0, tojson(res.shouldMigrate.min));
- assert(bsonWoCompare(res.shouldMigrate.max, { x: MaxKey }) == 0, tojson(res.shouldMigrate.max));
-},
-
-// One chunk has single doc, extreme doesn't.
-//
-// Expected doc counts for new chunks:
-// [ 0, 1 ): 1
-// [ 1, MaxKey): 2
-//
-function(db) {
- var res = callSplit(db, { x: 0 }, { x: MaxKey }, [{ x: 1 }]);
- assert.commandWorked(res);
- assert.eq(res.shouldMigrate, null, tojson(res));
-},
-
-// Only extreme has single doc.
-//
-// Expected doc counts for new chunks:
-// [ 0, 2 ): 2
-// [ 2, MaxKey): 1
-//
-function(db) {
- var res = callSplit(db, { x: 0 }, { x: MaxKey }, [{ x: 2 }]);
- assert.commandWorked(res);
- assert.neq(res.shouldMigrate, null, tojson(res));
- assert(bsonWoCompare(res.shouldMigrate.min, { x: 2 }) == 0, tojson(res.shouldMigrate.min));
- assert(bsonWoCompare(res.shouldMigrate.max, { x: MaxKey }) == 0, tojson(res.shouldMigrate.max));
-},
+ //
+ // Lower extreme chunk tests.
+ //
+
+ // All chunks have 1 doc.
+ //
+ // Expected doc counts for new chunks:
+ // [ MinKey, -2 ): 1
+ // [ -2, -1 ): 1
+ // [ -1, 0): 1
+ //
+ function(db) {
+ var res = callSplit(db, {x: MinKey}, {x: 0}, [{x: -2}, {x: -1}]);
+ assert.commandWorked(res);
+ assert.neq(res.shouldMigrate, null, tojson(res));
+ assert(bsonWoCompare(res.shouldMigrate.min, {x: MinKey}) == 0,
+ tojson(res.shouldMigrate.min));
+ assert(bsonWoCompare(res.shouldMigrate.max, {x: -2}) == 0, tojson(res.shouldMigrate.max));
+ },
+
+ // One chunk has single doc, extreme doesn't.
+ //
+ // Expected doc counts for new chunks:
+ // [ MinKey, -1 ): 2
+ // [ -1, 0): 1
+ //
+ function(db) {
+ var res = callSplit(db, {x: MinKey}, {x: 0}, [{x: -1}]);
+ assert.commandWorked(res);
+ assert.eq(res.shouldMigrate, null, tojson(res));
+ },
+
+ // Only extreme has single doc.
+ //
+ // Expected doc counts for new chunks:
+ // [ MinKey, -2 ): 1
+ // [ -2, 0): 2
+ //
+ function(db) {
+ var res = callSplit(db, {x: MinKey}, {x: 0}, [{x: -2}]);
+ assert.commandWorked(res);
+ assert.neq(res.shouldMigrate, null, tojson(res));
+ assert(bsonWoCompare(res.shouldMigrate.min, {x: MinKey}) == 0,
+ tojson(res.shouldMigrate.min));
+ assert(bsonWoCompare(res.shouldMigrate.max, {x: -2}) == 0, tojson(res.shouldMigrate.max));
+ },
+
+ //
+ // Upper extreme chunk tests.
+ //
+
+ // All chunks have 1 doc.
+ //
+ // Expected doc counts for new chunks:
+ // [ 0, 1 ): 1
+ // [ 1, 2 ): 1
+ // [ 2, MaxKey): 1
+ //
+ function(db) {
+ var res = callSplit(db, {x: 0}, {x: MaxKey}, [{x: 1}, {x: 2}]);
+ assert.commandWorked(res);
+ assert.neq(res.shouldMigrate, null, tojson(res));
+ assert(bsonWoCompare(res.shouldMigrate.min, {x: 2}) == 0, tojson(res.shouldMigrate.min));
+ assert(bsonWoCompare(res.shouldMigrate.max, {x: MaxKey}) == 0,
+ tojson(res.shouldMigrate.max));
+ },
+
+ // One chunk has single doc, extreme doesn't.
+ //
+ // Expected doc counts for new chunks:
+ // [ 0, 1 ): 1
+ // [ 1, MaxKey): 2
+ //
+ function(db) {
+ var res = callSplit(db, {x: 0}, {x: MaxKey}, [{x: 1}]);
+ assert.commandWorked(res);
+ assert.eq(res.shouldMigrate, null, tojson(res));
+ },
+
+ // Only extreme has single doc.
+ //
+ // Expected doc counts for new chunks:
+ // [ 0, 2 ): 2
+ // [ 2, MaxKey): 1
+ //
+ function(db) {
+ var res = callSplit(db, {x: 0}, {x: MaxKey}, [{x: 2}]);
+ assert.commandWorked(res);
+ assert.neq(res.shouldMigrate, null, tojson(res));
+ assert(bsonWoCompare(res.shouldMigrate.min, {x: 2}) == 0, tojson(res.shouldMigrate.min));
+ assert(bsonWoCompare(res.shouldMigrate.max, {x: MaxKey}) == 0,
+ tojson(res.shouldMigrate.max));
+ },
];
tests.forEach(function(test) {
// setup
- testDB.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
- testDB.adminCommand({ split: 'test.user', middle: { x: 0 }});
+ testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+ testDB.adminCommand({split: 'test.user', middle: {x: 0}});
for (var x = -3; x < 3; x++) {
- testDB.user.insert({ x: x });
+ testDB.user.insert({x: x});
}
// run test
@@ -135,4 +139,3 @@ tests.forEach(function(test) {
});
st.stop();
-
diff --git a/jstests/sharding/split_large_key.js b/jstests/sharding/split_large_key.js
index a0cdcd61d67..5a8fe060c67 100644
--- a/jstests/sharding/split_large_key.js
+++ b/jstests/sharding/split_large_key.js
@@ -1,68 +1,71 @@
// Test for splitting a chunk with a very large shard key value should not be allowed
// and does not corrupt the config.chunks metadata.
(function() {
-'use strict';
+ 'use strict';
-function verifyChunk(keys, expectFail) {
- // If split failed then there's only 1 chunk
- // With a min & max for the shardKey
- if (expectFail) {
- assert.eq(1, configDB.chunks.find().count(), "Chunks count no split");
- var chunkDoc = configDB.chunks.findOne();
- assert.eq(0, bsonWoCompare(chunkDoc.min, keys.min), "Chunks min");
- assert.eq(0, bsonWoCompare(chunkDoc.max, keys.max), "Chunks max");
- } else {
- assert.eq(2, configDB.chunks.find().count(), "Chunks count split");
+ function verifyChunk(keys, expectFail) {
+ // If split failed then there's only 1 chunk
+ // With a min & max for the shardKey
+ if (expectFail) {
+ assert.eq(1, configDB.chunks.find().count(), "Chunks count no split");
+ var chunkDoc = configDB.chunks.findOne();
+ assert.eq(0, bsonWoCompare(chunkDoc.min, keys.min), "Chunks min");
+ assert.eq(0, bsonWoCompare(chunkDoc.max, keys.max), "Chunks max");
+ } else {
+ assert.eq(2, configDB.chunks.find().count(), "Chunks count split");
+ }
}
-}
-// Tests
-// - name: Name of test, used in collection name
-// - key: key to test
-// - keyFieldSize: size of each key field
-// - expectFail: true/false, true if key is too large to pre-split
-var tests = [
- {name: "Key size small", key: {x: 1}, keyFieldSize: 100, expectFail: false},
- {name: "Key size 512", key: {x: 1}, keyFieldSize: 512, expectFail: true},
- {name: "Key size 2000", key: {x: 1}, keyFieldSize: 2000, expectFail: true},
- {name: "Compound key size small", key: {x: 1, y: 1}, keyFieldSize: 100, expectFail: false},
- {name: "Compound key size 512", key: {x: 1, y: 1}, keyFieldSize: 256, expectFail: true},
- {name: "Compound key size 10000", key: {x: 1, y: 1}, keyFieldSize: 5000, expectFail: true},
-];
+ // Tests
+ // - name: Name of test, used in collection name
+ // - key: key to test
+ // - keyFieldSize: size of each key field
+ // - expectFail: true/false, true if key is too large to pre-split
+ var tests = [
+ {name: "Key size small", key: {x: 1}, keyFieldSize: 100, expectFail: false},
+ {name: "Key size 512", key: {x: 1}, keyFieldSize: 512, expectFail: true},
+ {name: "Key size 2000", key: {x: 1}, keyFieldSize: 2000, expectFail: true},
+ {name: "Compound key size small", key: {x: 1, y: 1}, keyFieldSize: 100, expectFail: false},
+ {name: "Compound key size 512", key: {x: 1, y: 1}, keyFieldSize: 256, expectFail: true},
+ {name: "Compound key size 10000", key: {x: 1, y: 1}, keyFieldSize: 5000, expectFail: true},
+ ];
-var st = new ShardingTest({ shards: 1 });
-var configDB = st.s.getDB('config');
+ var st = new ShardingTest({shards: 1});
+ var configDB = st.s.getDB('config');
-assert.commandWorked(configDB.adminCommand({ enableSharding: 'test' }));
+ assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
-tests.forEach(function(test){
- var collName = "split_large_key_" + test.name;
- var midKey = {};
- var chunkKeys = {min: {}, max: {}};
- for (var k in test.key) {
- // new Array with join creates string length 1 less than size, so add 1
- midKey[k] = new Array(test.keyFieldSize+1).join('a');
- // min & max keys for each field in the index
- chunkKeys.min[k] = MinKey;
- chunkKeys.max[k] = MaxKey;
- }
+ tests.forEach(function(test) {
+ var collName = "split_large_key_" + test.name;
+ var midKey = {};
+ var chunkKeys = {
+ min: {},
+ max: {}
+ };
+ for (var k in test.key) {
+ // new Array with join creates string length 1 less than size, so add 1
+ midKey[k] = new Array(test.keyFieldSize + 1).join('a');
+ // min & max keys for each field in the index
+ chunkKeys.min[k] = MinKey;
+ chunkKeys.max[k] = MaxKey;
+ }
- assert.commandWorked(
- configDB.adminCommand({ shardCollection: "test." + collName, key: test.key }));
+ assert.commandWorked(
+ configDB.adminCommand({shardCollection: "test." + collName, key: test.key}));
- var res = configDB.adminCommand({ split: "test."+collName, middle: midKey});
- if (test.expectFail) {
- assert(!res.ok, "Split: " + collName);
- assert(res.errmsg !== null, "Split errmsg: " + collName);
- } else {
- assert(res.ok, "Split: " + collName + " " + res.errmsg);
- }
+ var res = configDB.adminCommand({split: "test." + collName, middle: midKey});
+ if (test.expectFail) {
+ assert(!res.ok, "Split: " + collName);
+ assert(res.errmsg !== null, "Split errmsg: " + collName);
+ } else {
+ assert(res.ok, "Split: " + collName + " " + res.errmsg);
+ }
- verifyChunk(chunkKeys, test.expectFail);
+ verifyChunk(chunkKeys, test.expectFail);
- st.s0.getCollection("test." + collName).drop();
-});
+ st.s0.getCollection("test." + collName).drop();
+ });
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/split_with_force.js b/jstests/sharding/split_with_force.js
index 117d17361e0..c66d2f145eb 100644
--- a/jstests/sharding/split_with_force.js
+++ b/jstests/sharding/split_with_force.js
@@ -2,60 +2,62 @@
// Tests autosplit locations with force : true
//
-var options = { chunkSize: 1, // MB
- mongosOptions : { noAutoSplit : "" }
- };
+var options = {
+ chunkSize: 1, // MB
+ mongosOptions: {noAutoSplit: ""}
+};
-var st = new ShardingTest({ shards : 1, mongos : 1, other : options });
+var st = new ShardingTest({shards: 1, mongos: 1, other: options});
st.stopBalancer();
var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var config = mongos.getDB( "config" );
-var shardAdmin = st.shard0.getDB( "admin" );
-var coll = mongos.getCollection( "foo.bar" );
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var shardAdmin = st.shard0.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
-assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
-assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
+assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
+assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
-jsTest.log( "Insert a bunch of data into a chunk of the collection..." );
+jsTest.log("Insert a bunch of data into a chunk of the collection...");
var bulk = coll.initializeUnorderedBulkOp();
-for ( var i = 0; i < (250 * 1000) + 10; i++ ) {
- bulk.insert({ _id : i });
+for (var i = 0; i < (250 * 1000) + 10; i++) {
+ bulk.insert({_id: i});
}
assert.writeOK(bulk.execute());
-jsTest.log( "Insert a bunch of data into the rest of the collection..." );
+jsTest.log("Insert a bunch of data into the rest of the collection...");
bulk = coll.initializeUnorderedBulkOp();
-for ( var i = 1; i <= (250 * 1000); i++ ) {
- bulk.insert({ _id: -i });
+for (var i = 1; i <= (250 * 1000); i++) {
+ bulk.insert({_id: -i});
}
assert.writeOK(bulk.execute());
-jsTest.log( "Get split points of the chunk using force : true..." );
+jsTest.log("Get split points of the chunk using force : true...");
var maxChunkSizeBytes = 1024 * 1024;
-var splitKeys = shardAdmin.runCommand({ splitVector : coll + "",
- keyPattern : { _id : 1 },
- min : { _id : 0 },
- max : { _id : MaxKey },
- force : true
- }).splitKeys;
+var splitKeys = shardAdmin.runCommand({
+ splitVector: coll + "",
+ keyPattern: {_id: 1},
+ min: {_id: 0},
+ max: {_id: MaxKey},
+ force: true
+}).splitKeys;
-printjson( splitKeys );
-printjson( coll.stats() );
+printjson(splitKeys);
+printjson(coll.stats());
st.printShardingStatus();
-jsTest.log( "Make sure our split is approximately in half..." );
+jsTest.log("Make sure our split is approximately in half...");
-assert.eq( splitKeys.length, 1 );
+assert.eq(splitKeys.length, 1);
var splitKey = splitKeys[0]._id;
-assert.gt( splitKey, ((250 * 1000) / 2) - 50 );
-assert.lt( splitKey, ((250 * 1000) / 2) + 50 );
+assert.gt(splitKey, ((250 * 1000) / 2) - 50);
+assert.lt(splitKey, ((250 * 1000) / 2) + 50);
st.stop();
diff --git a/jstests/sharding/split_with_force_small.js b/jstests/sharding/split_with_force_small.js
index 86fb4667132..ad14f8642cb 100644
--- a/jstests/sharding/split_with_force_small.js
+++ b/jstests/sharding/split_with_force_small.js
@@ -2,68 +2,70 @@
// Tests autosplit locations with force : true, for small collections
//
-var options = { chunkSize: 1, // MB
- mongosOptions : { noAutoSplit : "" }
- };
+var options = {
+ chunkSize: 1, // MB
+ mongosOptions: {noAutoSplit: ""}
+};
-var st = new ShardingTest({ shards : 1, mongos : 1, other : options });
+var st = new ShardingTest({shards: 1, mongos: 1, other: options});
st.stopBalancer();
var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var config = mongos.getDB( "config" );
-var shardAdmin = st.shard0.getDB( "admin" );
-var coll = mongos.getCollection( "foo.bar" );
+var admin = mongos.getDB("admin");
+var config = mongos.getDB("config");
+var shardAdmin = st.shard0.getDB("admin");
+var coll = mongos.getCollection("foo.bar");
-assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
-assert( admin.runCommand({ shardCollection : coll + "", key : { _id : 1 } }).ok );
-assert( admin.runCommand({ split : coll + "", middle : { _id : 0 } }).ok );
+assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+assert(admin.runCommand({shardCollection: coll + "", key: {_id: 1}}).ok);
+assert(admin.runCommand({split: coll + "", middle: {_id: 0}}).ok);
-jsTest.log( "Insert a bunch of data into the low chunk of a collection," +
- " to prevent relying on stats." );
+jsTest.log("Insert a bunch of data into the low chunk of a collection," +
+ " to prevent relying on stats.");
var data128k = "x";
-for ( var i = 0; i < 7; i++ ) data128k += data128k;
+for (var i = 0; i < 7; i++)
+ data128k += data128k;
var bulk = coll.initializeUnorderedBulkOp();
-for ( var i = 0; i < 1024; i++ ) {
- bulk.insert({ _id : -(i + 1) });
+for (var i = 0; i < 1024; i++) {
+ bulk.insert({_id: -(i + 1)});
}
assert.writeOK(bulk.execute());
-jsTest.log( "Insert 32 docs into the high chunk of a collection" );
+jsTest.log("Insert 32 docs into the high chunk of a collection");
bulk = coll.initializeUnorderedBulkOp();
-for ( var i = 0; i < 32; i++ ) {
- bulk.insert({ _id : i });
+for (var i = 0; i < 32; i++) {
+ bulk.insert({_id: i});
}
assert.writeOK(bulk.execute());
-jsTest.log( "Split off MaxKey chunk..." );
+jsTest.log("Split off MaxKey chunk...");
-assert( admin.runCommand({ split : coll + "", middle : { _id : 32 } }).ok );
+assert(admin.runCommand({split: coll + "", middle: {_id: 32}}).ok);
-jsTest.log( "Keep splitting chunk multiple times..." );
+jsTest.log("Keep splitting chunk multiple times...");
st.printShardingStatus();
-for ( var i = 0; i < 5; i++ ) {
- assert( admin.runCommand({ split : coll + "", find : { _id : 0 } }).ok );
+for (var i = 0; i < 5; i++) {
+ assert(admin.runCommand({split: coll + "", find: {_id: 0}}).ok);
st.printShardingStatus();
}
// Make sure we can't split further than 5 (2^5) times
-assert( !admin.runCommand({ split : coll + "", find : { _id : 0 } }).ok );
+assert(!admin.runCommand({split: coll + "", find: {_id: 0}}).ok);
-var chunks = config.chunks.find({ 'min._id' : { $gte : 0, $lt : 32 } }).sort({ min : 1 }).toArray();
-printjson( chunks );
+var chunks = config.chunks.find({'min._id': {$gte: 0, $lt: 32}}).sort({min: 1}).toArray();
+printjson(chunks);
// Make sure the chunks grow by 2x (except the first)
var nextSize = 1;
-for ( var i = 0; i < chunks.size; i++ ) {
- assert.eq( coll.count({ _id : { $gte : chunks[i].min._id, $lt : chunks[i].max._id } }),
- nextSize );
- if ( i != 0 ) nextSize += nextSize;
+for (var i = 0; i < chunks.size; i++) {
+ assert.eq(coll.count({_id: {$gte: chunks[i].min._id, $lt: chunks[i].max._id}}), nextSize);
+ if (i != 0)
+ nextSize += nextSize;
}
st.stop();
diff --git a/jstests/sharding/ssv_config_check.js b/jstests/sharding/ssv_config_check.js
index d1a1598b63f..edeb559d40d 100644
--- a/jstests/sharding/ssv_config_check.js
+++ b/jstests/sharding/ssv_config_check.js
@@ -3,70 +3,70 @@
* replica set name, but with a member list that is not strictly the same.
*/
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({ shards: 1 });
+ var st = new ShardingTest({shards: 1});
-var testDB = st.s.getDB('test');
-testDB.adminCommand({ enableSharding: 'test' });
-testDB.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
+ var testDB = st.s.getDB('test');
+ testDB.adminCommand({enableSharding: 'test'});
+ testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}});
-// Initialize version on shard.
-testDB.user.insert({ x: 1 });
+ // Initialize version on shard.
+ testDB.user.insert({x: 1});
-var directConn = new Mongo(st.d0.host);
-var adminDB = directConn.getDB('admin');
+ var directConn = new Mongo(st.d0.host);
+ var adminDB = directConn.getDB('admin');
-var configStr = adminDB.runCommand({ getShardVersion: 'test.user' }).configServer;
-var alternateConfigStr = configStr.substring(0, configStr.lastIndexOf(','));
+ var configStr = adminDB.runCommand({getShardVersion: 'test.user'}).configServer;
+ var alternateConfigStr = configStr.substring(0, configStr.lastIndexOf(','));
-var shardDoc = st.s.getDB('config').shards.findOne();
+ var shardDoc = st.s.getDB('config').shards.findOne();
-assert.commandWorked(adminDB.runCommand({
- setShardVersion: '',
- init: true,
- authoritative: true,
- configdb: alternateConfigStr,
- shard: shardDoc._id,
- shardHost: shardDoc.host
-}));
+ assert.commandWorked(adminDB.runCommand({
+ setShardVersion: '',
+ init: true,
+ authoritative: true,
+ configdb: alternateConfigStr,
+ shard: shardDoc._id,
+ shardHost: shardDoc.host
+ }));
-assert.commandFailed(adminDB.runCommand({
- setShardVersion: '',
- init: true,
- authoritative: true,
- configdb: 'bad-rs/local:12,local:34',
- shard: shardDoc._id,
- shardHost: shardDoc.host
-}));
+ assert.commandFailed(adminDB.runCommand({
+ setShardVersion: '',
+ init: true,
+ authoritative: true,
+ configdb: 'bad-rs/local:12,local:34',
+ shard: shardDoc._id,
+ shardHost: shardDoc.host
+ }));
-var configAdmin = st.c0.getDB('admin');
-// Initialize internal config string.
-assert.commandWorked(configAdmin.runCommand({
- setShardVersion: '',
- init: true,
- authoritative: true,
- configdb: configStr,
- shard: 'config'
-}));
+ var configAdmin = st.c0.getDB('admin');
+ // Initialize internal config string.
+ assert.commandWorked(configAdmin.runCommand({
+ setShardVersion: '',
+ init: true,
+ authoritative: true,
+ configdb: configStr,
+ shard: 'config'
+ }));
-// Passing configdb that does not match initialized value is not ok.
-assert.commandFailed(configAdmin.runCommand({
- setShardVersion: '',
- init: true,
- authoritative: true,
- configdb: 'bad-rs/local:12,local:34',
- shard: 'config'
-}));
+ // Passing configdb that does not match initialized value is not ok.
+ assert.commandFailed(configAdmin.runCommand({
+ setShardVersion: '',
+ init: true,
+ authoritative: true,
+ configdb: 'bad-rs/local:12,local:34',
+ shard: 'config'
+ }));
-// Passing configdb that matches initialized value is ok.
-assert.commandWorked(configAdmin.runCommand({
- setShardVersion: '',
- init: true,
- authoritative: true,
- configdb: alternateConfigStr,
- shard: 'config'
-}));
+ // Passing configdb that matches initialized value is ok.
+ assert.commandWorked(configAdmin.runCommand({
+ setShardVersion: '',
+ init: true,
+ authoritative: true,
+ configdb: alternateConfigStr,
+ shard: 'config'
+ }));
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/stale_mongos_updates_and_removes.js b/jstests/sharding/stale_mongos_updates_and_removes.js
index 193cfcb83bf..17f75b4c986 100644
--- a/jstests/sharding/stale_mongos_updates_and_removes.js
+++ b/jstests/sharding/stale_mongos_updates_and_removes.js
@@ -17,10 +17,10 @@
var resetCollection = function() {
assert(staleMongos.getCollection(collNS).drop());
st.ensurePrimaryShard(dbName, st._shardNames[0]);
- assert.commandWorked(staleMongos.adminCommand({ shardCollection: collNS, key: { x: 1 }}));
- for (var i=0; i<numShardKeys; i++) {
- assert.writeOK(staleMongos.getCollection(collNS).insert({ x: i, fieldToUpdate: 0 }));
- assert.writeOK(staleMongos.getCollection(collNS).insert({ x: i, fieldToUpdate: 0 }));
+ assert.commandWorked(staleMongos.adminCommand({shardCollection: collNS, key: {x: 1}}));
+ for (var i = 0; i < numShardKeys; i++) {
+ assert.writeOK(staleMongos.getCollection(collNS).insert({x: i, fieldToUpdate: 0}));
+ assert.writeOK(staleMongos.getCollection(collNS).insert({x: i, fieldToUpdate: 0}));
}
};
@@ -37,22 +37,18 @@ var makeStaleMongosTargetMultipleShards = function() {
resetCollection();
// Make sure staleMongos sees all data on first shard.
- var chunk = staleMongos.getCollection("config.chunks").findOne({ min: { x: MinKey },
- max: { x: MaxKey }});
+ var chunk =
+ staleMongos.getCollection("config.chunks").findOne({min: {x: MinKey}, max: {x: MaxKey}});
assert(chunk.shard === st._shardNames[0]);
// Make sure staleMongos sees two chunks on two different shards.
- assert.commandWorked(staleMongos.adminCommand({ split: collNS, middle: { x: splitPoint }}));
- assert.commandWorked(staleMongos.adminCommand({ moveChunk: collNS,
- find: { x: 0 },
- to: st._shardNames[1],
- _waitForDelete: true }));
+ assert.commandWorked(staleMongos.adminCommand({split: collNS, middle: {x: splitPoint}}));
+ assert.commandWorked(staleMongos.adminCommand(
+ {moveChunk: collNS, find: {x: 0}, to: st._shardNames[1], _waitForDelete: true}));
// Use freshMongos to consolidate the chunks on one shard.
- assert.commandWorked(freshMongos.adminCommand({ moveChunk: collNS,
- find: { x: 0 },
- to: st._shardNames[0],
- _waitForDelete: true }));
+ assert.commandWorked(freshMongos.adminCommand(
+ {moveChunk: collNS, find: {x: 0}, to: st._shardNames[0], _waitForDelete: true}));
};
// Create a new sharded collection and move a chunk from one shard to another. In the end,
@@ -65,20 +61,22 @@ var makeStaleMongosTargetMultipleShards = function() {
var makeStaleMongosTargetSingleShard = function() {
resetCollection();
// Make sure staleMongos sees all data on first shard.
- var chunk = staleMongos.getCollection("config.chunks").findOne({ min: { x: MinKey },
- max: { x: MaxKey }});
+ var chunk =
+ staleMongos.getCollection("config.chunks").findOne({min: {x: MinKey}, max: {x: MaxKey}});
assert(chunk.shard === st._shardNames[0]);
// Use freshMongos to move chunk to another shard.
- assert.commandWorked(freshMongos.adminCommand({ moveChunk: collNS,
- find: { x: 0 },
- to: st._shardNames[1],
- _waitForDelete: true }));
+ assert.commandWorked(freshMongos.adminCommand(
+ {moveChunk: collNS, find: {x: 0}, to: st._shardNames[1], _waitForDelete: true}));
};
var checkAllRemoveQueries = function(makeMongosStaleFunc) {
- var multi = { justOne: false };
- var single = { justOne: true };
+ var multi = {
+ justOne: false
+ };
+ var single = {
+ justOne: true
+ };
var doRemove = function(query, multiOption, makeMongosStaleFunc) {
makeMongosStaleFunc();
@@ -94,7 +92,7 @@ var checkAllRemoveQueries = function(makeMongosStaleFunc) {
var checkRemoveIsInvalid = function(query, multiOption, makeMongosStaleFunc) {
makeMongosStaleFunc();
- var res = staleMongos.getCollection(collNS).remove(query, multiOption);
+ var res = staleMongos.getCollection(collNS).remove(query, multiOption);
assert.writeError(res);
};
@@ -116,12 +114,23 @@ var checkAllRemoveQueries = function(makeMongosStaleFunc) {
};
var checkAllUpdateQueries = function(makeMongosStaleFunc) {
- var oUpdate = { $inc: { fieldToUpdate: 1 }}; // op-style update (non-idempotent)
- var rUpdate = { x: 0, fieldToUpdate: 1 }; // replacement-style update (idempotent)
- var queryAfterUpdate = { fieldToUpdate: 1 };
+ var oUpdate = {
+ $inc: {fieldToUpdate: 1}
+ }; // op-style update (non-idempotent)
+ var rUpdate = {
+ x: 0,
+ fieldToUpdate: 1
+ }; // replacement-style update (idempotent)
+ var queryAfterUpdate = {
+ fieldToUpdate: 1
+ };
- var multi = { multi: true };
- var single = { multi: false };
+ var multi = {
+ multi: true
+ };
+ var single = {
+ multi: false
+ };
var doUpdate = function(query, update, multiOption, makeMongosStaleFunc) {
makeMongosStaleFunc();
@@ -143,7 +152,7 @@ var checkAllUpdateQueries = function(makeMongosStaleFunc) {
};
// This update has inconsistent behavior as explained in SERVER-22895.
- //doUpdate(emptyQuery, rUpdate, single, makeMongosStaleFunc);
+ // doUpdate(emptyQuery, rUpdate, single, makeMongosStaleFunc);
// Not possible because replacement-style requires equality match on shard key.
checkUpdateIsInvalid(emptyQuery, rUpdate, multi, makeMongosStaleFunc);
// Not possible because op-style requires equality match on shard key if single update.
@@ -172,28 +181,34 @@ var checkAllUpdateQueries = function(makeMongosStaleFunc) {
doUpdate(multiPointQuery, oUpdate, multi, makeMongosStaleFunc);
};
-var st = new ShardingTest({shards: 2, mongos: 2, other: { mongosOptions: { noAutoSplit: "" }} });
+var st = new ShardingTest({shards: 2, mongos: 2, other: {mongosOptions: {noAutoSplit: ""}}});
var dbName = 'test';
var collNS = dbName + '.foo';
var numShardKeys = 10;
var numDocs = numShardKeys * 2;
var splitPoint = numShardKeys / 2;
-assert.commandWorked(st.s.adminCommand({ enableSharding: dbName }));
-assert.commandWorked(st.s.adminCommand({ shardCollection: collNS, key: { x: 1 }}));
+assert.commandWorked(st.s.adminCommand({enableSharding: dbName}));
+assert.commandWorked(st.s.adminCommand({shardCollection: collNS, key: {x: 1}}));
var freshMongos = st.s0;
var staleMongos = st.s1;
var emptyQuery = {};
-var pointQuery = { x: 0 };
+var pointQuery = {
+ x: 0
+};
// Choose a range that would fall on only one shard.
// Use (splitPoint - 1) because of SERVER-20768.
-var rangeQuery = { x: { $gte: 0, $lt: splitPoint - 1 }};
+var rangeQuery = {
+ x: {$gte: 0, $lt: splitPoint - 1}
+};
// Choose points that would fall on two different shards.
-var multiPointQuery = { $or: [{ x: 0 }, { x: numShardKeys }]};
+var multiPointQuery = {
+ $or: [{x: 0}, {x: numShardKeys}]
+};
checkAllRemoveQueries(makeStaleMongosTargetSingleShard);
checkAllRemoveQueries(makeStaleMongosTargetMultipleShards);
diff --git a/jstests/sharding/stale_version_write.js b/jstests/sharding/stale_version_write.js
index 21680f1abee..e5885dcfa41 100644
--- a/jstests/sharding/stale_version_write.js
+++ b/jstests/sharding/stale_version_write.js
@@ -1,37 +1,37 @@
// Tests whether a reset sharding version triggers errors
-jsTest.log( "Starting sharded cluster..." );
+jsTest.log("Starting sharded cluster...");
-var st = new ShardingTest( { shards : 1, mongos : 2, verbose : 2 } );
+var st = new ShardingTest({shards: 1, mongos: 2, verbose: 2});
st.stopBalancer();
var mongosA = st.s0;
var mongosB = st.s1;
-jsTest.log( "Adding new collections...");
+jsTest.log("Adding new collections...");
-var collA = mongosA.getCollection( jsTestName() + ".coll" );
-assert.writeOK(collA.insert({ hello : "world" }));
+var collA = mongosA.getCollection(jsTestName() + ".coll");
+assert.writeOK(collA.insert({hello: "world"}));
-var collB = mongosB.getCollection( "" + collA );
-assert.writeOK(collB.insert({ hello : "world" }));
+var collB = mongosB.getCollection("" + collA);
+assert.writeOK(collB.insert({hello: "world"}));
-jsTest.log( "Enabling sharding..." );
+jsTest.log("Enabling sharding...");
-printjson( mongosA.getDB( "admin" ).runCommand({ enableSharding : "" + collA.getDB() }) );
-printjson( mongosA.getDB( "admin" ).runCommand({ shardCollection : "" + collA, key : { _id : 1 } }) );
+printjson(mongosA.getDB("admin").runCommand({enableSharding: "" + collA.getDB()}));
+printjson(mongosA.getDB("admin").runCommand({shardCollection: "" + collA, key: {_id: 1}}));
// MongoD doesn't know about the config shard version *until* MongoS tells it
collA.findOne();
-jsTest.log( "Trigger shard version mismatch..." );
+jsTest.log("Trigger shard version mismatch...");
-assert.writeOK(collB.insert({ goodbye : "world" }));
+assert.writeOK(collB.insert({goodbye: "world"}));
-print( "Inserted..." );
+print("Inserted...");
-assert.eq( 3, collA.find().itcount() );
-assert.eq( 3, collB.find().itcount() );
+assert.eq(3, collA.find().itcount());
+assert.eq(3, collB.find().itcount());
st.stop();
diff --git a/jstests/sharding/startup_with_all_configs_down.js b/jstests/sharding/startup_with_all_configs_down.js
index f88f128a75a..61bff427580 100644
--- a/jstests/sharding/startup_with_all_configs_down.js
+++ b/jstests/sharding/startup_with_all_configs_down.js
@@ -6,68 +6,71 @@
// A restarted standalone will lose all data when using an ephemeral storage engine.
// @tags: [requires_persistence]
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({shards: 2});
+ var st = new ShardingTest({shards: 2});
-jsTestLog("Setting up initial data");
+ jsTestLog("Setting up initial data");
-for (var i = 0; i < 100; i++) {
- assert.writeOK(st.s.getDB('test').foo.insert({_id:i}));
-}
+ for (var i = 0; i < 100; i++) {
+ assert.writeOK(st.s.getDB('test').foo.insert({_id: i}));
+ }
-st.ensurePrimaryShard('test', 'shard0000');
+ st.ensurePrimaryShard('test', 'shard0000');
-st.adminCommand({enableSharding: 'test'});
-st.adminCommand({shardCollection: 'test.foo', key: {_id: 1}});
-st.adminCommand({split: 'test.foo', find: {_id: 50}});
-st.adminCommand({moveChunk: 'test.foo', find: {_id: 75}, to: 'shard0001'});
+ st.adminCommand({enableSharding: 'test'});
+ st.adminCommand({shardCollection: 'test.foo', key: {_id: 1}});
+ st.adminCommand({split: 'test.foo', find: {_id: 50}});
+ st.adminCommand({moveChunk: 'test.foo', find: {_id: 75}, to: 'shard0001'});
-// Make sure the pre-existing mongos already has the routing information loaded into memory
-assert.eq(100, st.s.getDB('test').foo.find().itcount());
+ // Make sure the pre-existing mongos already has the routing information loaded into memory
+ assert.eq(100, st.s.getDB('test').foo.find().itcount());
-jsTestLog("Shutting down all config servers");
-for (var i = 0; i < st._configServers.length; i++) {
- st.stopConfigServer(i);
-}
+ jsTestLog("Shutting down all config servers");
+ for (var i = 0; i < st._configServers.length; i++) {
+ st.stopConfigServer(i);
+ }
-jsTestLog("Starting a new mongos when there are no config servers up");
-var newMongosInfo = MongoRunner.runMongos({configdb: st._configDB, waitForConnect: false});
-// The new mongos won't accept any new connections, but it should stay up and continue trying
-// to contact the config servers to finish startup.
-assert.throws(function() { new Mongo(newMongosInfo.host); });
+ jsTestLog("Starting a new mongos when there are no config servers up");
+ var newMongosInfo = MongoRunner.runMongos({configdb: st._configDB, waitForConnect: false});
+ // The new mongos won't accept any new connections, but it should stay up and continue trying
+ // to contact the config servers to finish startup.
+ assert.throws(function() {
+ new Mongo(newMongosInfo.host);
+ });
+ jsTestLog("Restarting a shard while there are no config servers up");
+ MongoRunner.stopMongod(st.shard1);
+ st.shard1.restart = true;
+ MongoRunner.runMongod(st.shard1);
-jsTestLog("Restarting a shard while there are no config servers up");
-MongoRunner.stopMongod(st.shard1);
-st.shard1.restart = true;
-MongoRunner.runMongod(st.shard1);
+ jsTestLog("Queries should fail because the shard can't initialize sharding state");
+ var error = assert.throws(function() {
+ st.s.getDB('test').foo.find().itcount();
+ });
+ assert.eq(ErrorCodes.ExceededTimeLimit, error.code);
-jsTestLog("Queries should fail because the shard can't initialize sharding state");
-var error = assert.throws(function() {st.s.getDB('test').foo.find().itcount();});
-assert.eq(ErrorCodes.ExceededTimeLimit, error.code);
+ jsTestLog("Restarting the config servers");
+ for (var i = 0; i < st._configServers.length; i++) {
+ st.restartConfigServer(i);
+ }
-jsTestLog("Restarting the config servers");
-for (var i = 0; i < st._configServers.length; i++) {
- st.restartConfigServer(i);
-}
+ jsTestLog("Queries against the original mongos should work again");
+ assert.eq(100, st.s.getDB('test').foo.find().itcount());
-jsTestLog("Queries against the original mongos should work again");
-assert.eq(100, st.s.getDB('test').foo.find().itcount());
+ jsTestLog("Should now be possible to connect to the mongos that was started while the config " +
+ "servers were down");
+ var mongos2 = null;
+ assert.soon(function() {
+ try {
+ mongos2 = new Mongo(newMongosInfo.host);
+ return true;
+ } catch (e) {
+ printjson(e);
+ return false;
+ }
+ });
+ assert.eq(100, mongos2.getDB('test').foo.find().itcount());
-jsTestLog("Should now be possible to connect to the mongos that was started while the config "
- + "servers were down");
-var mongos2 = null;
-assert.soon(function() {
- try {
- mongos2 = new Mongo(newMongosInfo.host);
- return true;
- } catch (e) {
- printjson(e);
- return false;
- }
- });
-assert.eq(100, mongos2.getDB('test').foo.find().itcount());
-
-st.stop();
+ st.stop();
}());
diff --git a/jstests/sharding/stats.js b/jstests/sharding/stats.js
index ba7221cbe47..da6d842fb99 100644
--- a/jstests/sharding/stats.js
+++ b/jstests/sharding/stats.js
@@ -1,196 +1,207 @@
-(function () {
-
-var s = new ShardingTest({ name: "stats", shards: 2, mongos: 1 });
-
-s.adminCommand( { enablesharding : "test" } );
-
-a = s._connections[0].getDB( "test" );
-b = s._connections[1].getDB( "test" );
-
-db = s.getDB( "test" );
-s.ensurePrimaryShard('test', 'shard0001');
-
-function numKeys(o){
- var num = 0;
- for (var x in o)
- num++;
- return num;
-}
-
-db.foo.drop();
-assert.commandFailed(db.foo.stats(),
- 'db.collection.stats() should fail on non-existent collection');
-
-// ---------- load some data -----
-
-// need collections sharded before and after main collection for proper test
-s.adminCommand( { shardcollection : "test.aaa" , key : { _id : 1 } } );
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } ); // this collection is actually used
-s.adminCommand( { shardcollection : "test.zzz" , key : { _id : 1 } } );
-
-
-N = 10000;
-s.adminCommand( { split : "test.foo" , middle : { _id : N/2 } } );
-s.adminCommand({ moveChunk: "test.foo", find: { _id: 3 },
- to: s.getNonPrimaries("test")[0], _waitForDelete: true });
-
-var bulk = db.foo.initializeUnorderedBulkOp();
-for ( i=0; i<N; i++ )
- bulk.insert( { _id : i } );
-assert.writeOK(bulk.execute());
-
-x = db.foo.stats();
-assert.eq( N , x.count , "coll total count expected" );
-assert.eq( db.foo.count() , x.count , "coll total count match" );
-assert.eq( 2 , x.nchunks , "coll chunk num" );
-assert.eq( 2 , numKeys(x.shards) , "coll shard num" );
-assert.eq( N / 2 , x.shards.shard0000.count , "coll count on shard0000 expected" );
-assert.eq( N / 2 , x.shards.shard0001.count , "coll count on shard0001 expected" );
-assert.eq( a.foo.count() , x.shards.shard0000.count , "coll count on shard0000 match" );
-assert.eq( b.foo.count() , x.shards.shard0001.count , "coll count on shard0001 match" );
-assert(!x.shards.shard0000.indexDetails,
- 'indexDetails should not be present in shard0000: ' + tojson(x.shards.shard0000));
-assert(!x.shards.shard0001.indexDetails,
- 'indexDetails should not be present in shard0001: ' + tojson(x.shards.shard0001));
-
-
-a_extras = a.stats().objects - a.foo.count(); // things like system.namespaces and system.indexes
-b_extras = b.stats().objects - b.foo.count(); // things like system.namespaces and system.indexes
-print("a_extras: " + a_extras);
-print("b_extras: " + b_extras);
-
-x = db.stats();
-
-//dbstats uses Future::CommandResult so raw output uses connection strings not shard names
-shards = Object.keySet(x.raw);
-
-assert.eq( N + (a_extras + b_extras) , x.objects , "db total count expected" );
-assert.eq( 2 , numKeys(x.raw) , "db shard num" );
-assert.eq( (N / 2) + a_extras, x.raw[shards[0]].objects , "db count on shard0000 expected" );
-assert.eq( (N / 2) + b_extras, x.raw[shards[1]].objects , "db count on shard0001 expected" );
-assert.eq( a.stats().objects , x.raw[shards[0]].objects , "db count on shard0000 match" );
-assert.eq( b.stats().objects , x.raw[shards[1]].objects , "db count on shard0001 match" );
-
-/* Test db.stat() and db.collection.stat() scaling */
-
-/* Helper functions */
-function statComp(stat, stat_scaled, scale) {
- /* Because of loss of floating point precision, do not check exact equality */
- if ( stat == stat_scaled )
- return true;
- assert(((stat_scaled - 2) <= (stat / scale)) &&
- ((stat / scale) <= (stat_scaled + 2)));
-}
-
-function dbStatComp(stat_obj, stat_obj_scaled, scale) {
- statComp(stat_obj.dataSize, stat_obj_scaled.dataSize, scale);
- statComp(stat_obj.storageSize, stat_obj_scaled.storageSize, scale);
- statComp(stat_obj.indexSize, stat_obj_scaled.indexSize, scale);
- statComp(stat_obj.fileSize, stat_obj_scaled.fileSize, scale);
- /* avgObjSize not scaled. See SERVER-7347 */
- statComp(stat_obj.avgObjSize, stat_obj_scaled.avgObjSize, 1);
-}
-
-function collStatComp(stat_obj, stat_obj_scaled, scale, mongos) {
- statComp(stat_obj.size, stat_obj_scaled.size, scale);
- statComp(stat_obj.storageSize, stat_obj_scaled.storageSize, scale);
- statComp(stat_obj.totalIndexSize, stat_obj_scaled.totalIndexSize, scale);
- statComp(stat_obj.avgObjSize, stat_obj_scaled.avgObjSize, scale);
- /* lastExtentSize doesn't exist in mongos level collection stats */
- if (!mongos) {
- statComp(stat_obj.lastExtentSize, stat_obj_scaled.lastExtentSize, scale);
- }
-}
+(function() {
-/* db.stats() tests */
-db_not_scaled = db.stats();
-db_scaled_512 = db.stats(512);
-db_scaled_1024 = db.stats(1024);
+ var s = new ShardingTest({name: "stats", shards: 2, mongos: 1});
-for (var shard in db_not_scaled.raw) {
- dbStatComp(db_not_scaled.raw[shard], db_scaled_512.raw[shard], 512);
- dbStatComp(db_not_scaled.raw[shard], db_scaled_1024.raw[shard], 1024);
-}
+ s.adminCommand({enablesharding: "test"});
-dbStatComp(db_not_scaled, db_scaled_512, 512);
-dbStatComp(db_not_scaled, db_scaled_1024, 1024);
+ a = s._connections[0].getDB("test");
+ b = s._connections[1].getDB("test");
-/* db.collection.stats() tests */
-coll_not_scaled = db.foo.stats();
-coll_scaled_512 = db.foo.stats(512);
-coll_scaled_1024 = db.foo.stats(1024);
+ db = s.getDB("test");
+ s.ensurePrimaryShard('test', 'shard0001');
-for (var shard in coll_not_scaled.shards) {
- collStatComp(coll_not_scaled.shards[shard], coll_scaled_512.shards[shard], 512, false);
- collStatComp(coll_not_scaled.shards[shard], coll_scaled_1024.shards[shard], 1024, false);
-}
+ function numKeys(o) {
+ var num = 0;
+ for (var x in o)
+ num++;
+ return num;
+ }
-collStatComp(coll_not_scaled, coll_scaled_512, 512, true);
-collStatComp(coll_not_scaled, coll_scaled_1024, 1024, true);
+ db.foo.drop();
+ assert.commandFailed(db.foo.stats(),
+ 'db.collection.stats() should fail on non-existent collection');
+
+ // ---------- load some data -----
+
+ // need collections sharded before and after main collection for proper test
+ s.adminCommand({shardcollection: "test.aaa", key: {_id: 1}});
+ s.adminCommand(
+ {shardcollection: "test.foo", key: {_id: 1}}); // this collection is actually used
+ s.adminCommand({shardcollection: "test.zzz", key: {_id: 1}});
+
+ N = 10000;
+ s.adminCommand({split: "test.foo", middle: {_id: N / 2}});
+ s.adminCommand({
+ moveChunk: "test.foo",
+ find: {_id: 3},
+ to: s.getNonPrimaries("test")[0],
+ _waitForDelete: true
+ });
+
+ var bulk = db.foo.initializeUnorderedBulkOp();
+ for (i = 0; i < N; i++)
+ bulk.insert({_id: i});
+ assert.writeOK(bulk.execute());
+
+ x = db.foo.stats();
+ assert.eq(N, x.count, "coll total count expected");
+ assert.eq(db.foo.count(), x.count, "coll total count match");
+ assert.eq(2, x.nchunks, "coll chunk num");
+ assert.eq(2, numKeys(x.shards), "coll shard num");
+ assert.eq(N / 2, x.shards.shard0000.count, "coll count on shard0000 expected");
+ assert.eq(N / 2, x.shards.shard0001.count, "coll count on shard0001 expected");
+ assert.eq(a.foo.count(), x.shards.shard0000.count, "coll count on shard0000 match");
+ assert.eq(b.foo.count(), x.shards.shard0001.count, "coll count on shard0001 match");
+ assert(!x.shards.shard0000.indexDetails,
+ 'indexDetails should not be present in shard0000: ' + tojson(x.shards.shard0000));
+ assert(!x.shards.shard0001.indexDetails,
+ 'indexDetails should not be present in shard0001: ' + tojson(x.shards.shard0001));
+
+ a_extras =
+ a.stats().objects - a.foo.count(); // things like system.namespaces and system.indexes
+ b_extras =
+ b.stats().objects - b.foo.count(); // things like system.namespaces and system.indexes
+ print("a_extras: " + a_extras);
+ print("b_extras: " + b_extras);
+
+ x = db.stats();
+
+ // dbstats uses Future::CommandResult so raw output uses connection strings not shard names
+ shards = Object.keySet(x.raw);
+
+ assert.eq(N + (a_extras + b_extras), x.objects, "db total count expected");
+ assert.eq(2, numKeys(x.raw), "db shard num");
+ assert.eq((N / 2) + a_extras, x.raw[shards[0]].objects, "db count on shard0000 expected");
+ assert.eq((N / 2) + b_extras, x.raw[shards[1]].objects, "db count on shard0001 expected");
+ assert.eq(a.stats().objects, x.raw[shards[0]].objects, "db count on shard0000 match");
+ assert.eq(b.stats().objects, x.raw[shards[1]].objects, "db count on shard0001 match");
+
+ /* Test db.stat() and db.collection.stat() scaling */
+
+ /* Helper functions */
+ function statComp(stat, stat_scaled, scale) {
+ /* Because of loss of floating point precision, do not check exact equality */
+ if (stat == stat_scaled)
+ return true;
+ assert(((stat_scaled - 2) <= (stat / scale)) && ((stat / scale) <= (stat_scaled + 2)));
+ }
-/* db.collection.stats() - indexDetails tests */
-(function() {
- var t = db.foo;
-
- assert.commandWorked(t.ensureIndex({a: 1}));
- assert.eq(2, t.getIndexes().length);
-
- var isWiredTiger = (!jsTest.options().storageEngine
- || jsTest.options().storageEngine === "wiredTiger");
-
- var stats = assert.commandWorked(t.stats({indexDetails: true}));
- var shardName;
- var shardStats;
- for (shardName in stats.shards) {
- shardStats = stats.shards[shardName];
- assert(shardStats.indexDetails,
- 'indexDetails missing for ' + shardName + ': ' + tojson(shardStats));
- if (isWiredTiger) {
- assert.eq(t.getIndexes().length, Object.keys(shardStats.indexDetails).length,
- 'incorrect number of entries in WiredTiger indexDetails: ' +
- tojson(shardStats));
+ function dbStatComp(stat_obj, stat_obj_scaled, scale) {
+ statComp(stat_obj.dataSize, stat_obj_scaled.dataSize, scale);
+ statComp(stat_obj.storageSize, stat_obj_scaled.storageSize, scale);
+ statComp(stat_obj.indexSize, stat_obj_scaled.indexSize, scale);
+ statComp(stat_obj.fileSize, stat_obj_scaled.fileSize, scale);
+ /* avgObjSize not scaled. See SERVER-7347 */
+ statComp(stat_obj.avgObjSize, stat_obj_scaled.avgObjSize, 1);
+ }
+
+ function collStatComp(stat_obj, stat_obj_scaled, scale, mongos) {
+ statComp(stat_obj.size, stat_obj_scaled.size, scale);
+ statComp(stat_obj.storageSize, stat_obj_scaled.storageSize, scale);
+ statComp(stat_obj.totalIndexSize, stat_obj_scaled.totalIndexSize, scale);
+ statComp(stat_obj.avgObjSize, stat_obj_scaled.avgObjSize, scale);
+ /* lastExtentSize doesn't exist in mongos level collection stats */
+ if (!mongos) {
+ statComp(stat_obj.lastExtentSize, stat_obj_scaled.lastExtentSize, scale);
}
}
- function getIndexName(indexKey) {
- var indexes = t.getIndexes().filter(function(doc) {
- return friendlyEqual(doc.key, indexKey);
- });
- assert.eq(1, indexes.length, tojson(indexKey) + ' not found in getIndexes() result: ' +
- tojson(t.getIndexes()));
- return indexes[0].name;
+ /* db.stats() tests */
+ db_not_scaled = db.stats();
+ db_scaled_512 = db.stats(512);
+ db_scaled_1024 = db.stats(1024);
+
+ for (var shard in db_not_scaled.raw) {
+ dbStatComp(db_not_scaled.raw[shard], db_scaled_512.raw[shard], 512);
+ dbStatComp(db_not_scaled.raw[shard], db_scaled_1024.raw[shard], 1024);
+ }
+
+ dbStatComp(db_not_scaled, db_scaled_512, 512);
+ dbStatComp(db_not_scaled, db_scaled_1024, 1024);
+
+ /* db.collection.stats() tests */
+ coll_not_scaled = db.foo.stats();
+ coll_scaled_512 = db.foo.stats(512);
+ coll_scaled_1024 = db.foo.stats(1024);
+
+ for (var shard in coll_not_scaled.shards) {
+ collStatComp(coll_not_scaled.shards[shard], coll_scaled_512.shards[shard], 512, false);
+ collStatComp(coll_not_scaled.shards[shard], coll_scaled_1024.shards[shard], 1024, false);
}
- function checkIndexDetails(options, indexName) {
- var stats = assert.commandWorked(t.stats(options));
+ collStatComp(coll_not_scaled, coll_scaled_512, 512, true);
+ collStatComp(coll_not_scaled, coll_scaled_1024, 1024, true);
+
+ /* db.collection.stats() - indexDetails tests */
+ (function() {
+ var t = db.foo;
+
+ assert.commandWorked(t.ensureIndex({a: 1}));
+ assert.eq(2, t.getIndexes().length);
+
+ var isWiredTiger =
+ (!jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger");
+
+ var stats = assert.commandWorked(t.stats({indexDetails: true}));
+ var shardName;
+ var shardStats;
for (shardName in stats.shards) {
shardStats = stats.shards[shardName];
assert(shardStats.indexDetails,
- 'indexDetails missing from db.collection.stats(' + tojson(options) +
- ').shards[' + shardName + '] result: ' + tojson(shardStats));
- // Currently, indexDetails is only supported with WiredTiger.
+ 'indexDetails missing for ' + shardName + ': ' + tojson(shardStats));
if (isWiredTiger) {
- assert.eq(1, Object.keys(shardStats.indexDetails).length,
- 'WiredTiger indexDetails must have exactly one entry');
- assert(shardStats.indexDetails[indexName],
- indexName + ' missing from WiredTiger indexDetails: ' +
- tojson(shardStats.indexDetails));
- assert.neq(0, Object.keys(shardStats.indexDetails[indexName]).length,
- indexName + ' exists in indexDetails but contains no information: ' +
- tojson(shardStats.indexDetails));
+ assert.eq(t.getIndexes().length,
+ Object.keys(shardStats.indexDetails).length,
+ 'incorrect number of entries in WiredTiger indexDetails: ' +
+ tojson(shardStats));
+ }
+ }
+
+ function getIndexName(indexKey) {
+ var indexes = t.getIndexes().filter(function(doc) {
+ return friendlyEqual(doc.key, indexKey);
+ });
+ assert.eq(
+ 1,
+ indexes.length,
+ tojson(indexKey) + ' not found in getIndexes() result: ' + tojson(t.getIndexes()));
+ return indexes[0].name;
+ }
+
+ function checkIndexDetails(options, indexName) {
+ var stats = assert.commandWorked(t.stats(options));
+ for (shardName in stats.shards) {
+ shardStats = stats.shards[shardName];
+ assert(shardStats.indexDetails,
+ 'indexDetails missing from db.collection.stats(' + tojson(options) +
+ ').shards[' + shardName + '] result: ' + tojson(shardStats));
+ // Currently, indexDetails is only supported with WiredTiger.
+ if (isWiredTiger) {
+ assert.eq(1,
+ Object.keys(shardStats.indexDetails).length,
+ 'WiredTiger indexDetails must have exactly one entry');
+ assert(shardStats.indexDetails[indexName],
+ indexName + ' missing from WiredTiger indexDetails: ' +
+ tojson(shardStats.indexDetails));
+ assert.neq(0,
+ Object.keys(shardStats.indexDetails[indexName]).length,
+ indexName + ' exists in indexDetails but contains no information: ' +
+ tojson(shardStats.indexDetails));
+ }
}
}
- }
- // indexDetailsKey - show indexDetails results for this index key only.
- var indexKey = {a: 1};
- var indexName = getIndexName(indexKey);
- checkIndexDetails({indexDetails: true, indexDetailsKey: indexKey}, indexName);
+ // indexDetailsKey - show indexDetails results for this index key only.
+ var indexKey = {
+ a: 1
+ };
+ var indexName = getIndexName(indexKey);
+ checkIndexDetails({indexDetails: true, indexDetailsKey: indexKey}, indexName);
- // indexDetailsName - show indexDetails results for this index name only.
- checkIndexDetails({indexDetails: true, indexDetailsName: indexName}, indexName);
-}());
+ // indexDetailsName - show indexDetails results for this index name only.
+ checkIndexDetails({indexDetails: true, indexDetailsName: indexName}, indexName);
+ }());
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/tag_auto_split.js b/jstests/sharding/tag_auto_split.js
index 835ec3b1546..df2011e5425 100644
--- a/jstests/sharding/tag_auto_split.js
+++ b/jstests/sharding/tag_auto_split.js
@@ -1,72 +1,66 @@
// Test to make sure that tag ranges get split
(function() {
-var s = new ShardingTest({ name: "tag_auto_split",
- shards: 2,
- mongos: 1,
- other: { enableBalancer : true } });
+ var s = new ShardingTest(
+ {name: "tag_auto_split", shards: 2, mongos: 1, other: {enableBalancer: true}});
-db = s.getDB( "test" );
+ db = s.getDB("test");
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1 } } );
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1}});
-assert.eq( 1, s.config.chunks.count() );
+ assert.eq(1, s.config.chunks.count());
-sh.addShardTag( "shard0000" , "a" );
+ sh.addShardTag("shard0000", "a");
-sh.addTagRange( "test.foo" , { _id : 5 } , { _id : 10 } , "a" );
-sh.addTagRange( "test.foo" , { _id : 10 } , { _id : 15 } , "b" );
+ sh.addTagRange("test.foo", {_id: 5}, {_id: 10}, "a");
+ sh.addTagRange("test.foo", {_id: 10}, {_id: 15}, "b");
-assert.soon( function() {
- return s.config.chunks.count() == 3;
-}, "things didn't get split", 1000 * 60 * 10, 1000 );
+ assert.soon(function() {
+ return s.config.chunks.count() == 3;
+ }, "things didn't get split", 1000 * 60 * 10, 1000);
-s.printShardingStatus();
+ s.printShardingStatus();
-s.stop();
+ s.stop();
-//test without full shard key on tags
-s = new ShardingTest({ name: "tag_auto_split2",
- shards: 2,
- mongos: 1,
- other: { enableBalancer : true } });
+ // test without full shard key on tags
+ s = new ShardingTest(
+ {name: "tag_auto_split2", shards: 2, mongos: 1, other: {enableBalancer: true}});
-db = s.getDB( "test" );
+ db = s.getDB("test");
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardcollection : "test.foo" , key : { _id : 1, a : 1 } } );
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+ s.adminCommand({shardcollection: "test.foo", key: {_id: 1, a: 1}});
-assert.eq( 1, s.config.chunks.count() );
+ assert.eq(1, s.config.chunks.count());
-sh.addShardTag( "shard0000" , "a" );
+ sh.addShardTag("shard0000", "a");
-sh.addTagRange( "test.foo" , { _id : 5 } , { _id : 10 } , "a" );
-sh.addTagRange( "test.foo" , { _id : 10 } , { _id : 15 } , "b" );
+ sh.addTagRange("test.foo", {_id: 5}, {_id: 10}, "a");
+ sh.addTagRange("test.foo", {_id: 10}, {_id: 15}, "b");
-assert.soon( function() {
- return s.config.chunks.count() == 3;
-}, "things didn't get split", 1000 * 60 * 10, 1000 );
+ assert.soon(function() {
+ return s.config.chunks.count() == 3;
+ }, "things didn't get split", 1000 * 60 * 10, 1000);
-s.config.chunks.find().forEach(
- function(chunk){
+ s.config.chunks.find().forEach(function(chunk) {
var numFields = 0;
- for ( var x in chunk.min ) {
+ for (var x in chunk.min) {
numFields++;
- assert( x == "_id" || x == "a", tojson(chunk) );
+ assert(x == "_id" || x == "a", tojson(chunk));
}
- assert.eq( 2, numFields,tojson(chunk) );
- }
-);
+ assert.eq(2, numFields, tojson(chunk));
+ });
-// check chunk mins correspond exactly to tag range boundaries, extended to match shard key
-assert.eq( 1, s.config.chunks.find( {min : {_id : 5 , a : MinKey} } ).count(),
- "bad chunk range boundary" );
-assert.eq( 1, s.config.chunks.find( {min : {_id : 10 , a : MinKey} } ).count(),
- "bad chunk range boundary" );
+ // check chunk mins correspond exactly to tag range boundaries, extended to match shard key
+ assert.eq(
+ 1, s.config.chunks.find({min: {_id: 5, a: MinKey}}).count(), "bad chunk range boundary");
+ assert.eq(
+ 1, s.config.chunks.find({min: {_id: 10, a: MinKey}}).count(), "bad chunk range boundary");
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/tag_range.js b/jstests/sharding/tag_range.js
index 897433001e2..a07656422d3 100644
--- a/jstests/sharding/tag_range.js
+++ b/jstests/sharding/tag_range.js
@@ -1,82 +1,80 @@
// tests to make sure that tag ranges are added/removed/updated successfully
-function countTags( num, message ) {
- assert.eq( s.config.tags.count() , num , message );
+function countTags(num, message) {
+ assert.eq(s.config.tags.count(), num, message);
}
-var s = new ShardingTest({ name: "tag_range",
- shards: 2,
- mongos: 1 });
+var s = new ShardingTest({name: "tag_range", shards: 2, mongos: 1});
// this set up is not required but prevents warnings in the remove
-db = s.getDB( "tag_range" );
+db = s.getDB("tag_range");
-s.adminCommand( { enableSharding : "test" } );
+s.adminCommand({enableSharding: "test"});
s.ensurePrimaryShard('test', 'shard0001');
-s.adminCommand( { shardCollection : "test.tag_range" , key : { _id : 1 } } );
+s.adminCommand({shardCollection: "test.tag_range", key: {_id: 1}});
-assert.eq( 1 , s.config.chunks.count() );
+assert.eq(1, s.config.chunks.count());
-sh.addShardTag( "shard0000" , "a" );
+sh.addShardTag("shard0000", "a");
// add two ranges, verify the additions
-sh.addTagRange( "test.tag_range" , { _id : 5 } , { _id : 10 } , "a" );
-sh.addTagRange( "test.tag_range" , { _id : 10 } , { _id : 15 } , "b" );
+sh.addTagRange("test.tag_range", {_id: 5}, {_id: 10}, "a");
+sh.addTagRange("test.tag_range", {_id: 10}, {_id: 15}, "b");
-countTags( 2 , "tag ranges were not successfully added" );
+countTags(2, "tag ranges were not successfully added");
// remove the second range, should be left with one
-sh.removeTagRange( "test.tag_range" , { _id : 10 } , { _id : 15 } , "b" );
+sh.removeTagRange("test.tag_range", {_id: 10}, {_id: 15}, "b");
-countTags( 1 , "tag range not removed successfully" );
+countTags(1, "tag range not removed successfully");
// the additions are actually updates, so you can alter a range's max
-sh.addTagRange( "test.tag_range" , { _id : 5 } , { _id : 11 } , "a" );
+sh.addTagRange("test.tag_range", {_id: 5}, {_id: 11}, "a");
-assert.eq( 11 , s.config.tags.findOne().max._id , "tag range not updated successfully" );
+assert.eq(11, s.config.tags.findOne().max._id, "tag range not updated successfully");
// add range min=max, verify the additions
try {
- sh.addTagRange( "test.tag_range" , { _id : 20 } , { _id : 20 } , "a" );
+ sh.addTagRange("test.tag_range", {_id: 20}, {_id: 20}, "a");
} catch (e) {
- countTags( 1 , "tag range should not have been added" );
+ countTags(1, "tag range should not have been added");
}
// removeTagRange tests for tag ranges that do not exist
// Bad namespace
-sh.removeTagRange("badns", { _id : 5 }, { _id : 11 }, "a");
-countTags(1 , "Bad namespace: tag range does not exist");
+sh.removeTagRange("badns", {_id: 5}, {_id: 11}, "a");
+countTags(1, "Bad namespace: tag range does not exist");
// Bad tag
-sh.removeTagRange("test.tag_range", { _id : 5 }, { _id : 11 }, "badtag");
-countTags(1 , "Bad tag: tag range does not exist");
+sh.removeTagRange("test.tag_range", {_id: 5}, {_id: 11}, "badtag");
+countTags(1, "Bad tag: tag range does not exist");
// Bad min
-sh.removeTagRange("test.tag_range", { _id : 0 }, { _id : 11 }, "a");
-countTags(1 , "Bad min: tag range does not exist");
+sh.removeTagRange("test.tag_range", {_id: 0}, {_id: 11}, "a");
+countTags(1, "Bad min: tag range does not exist");
// Bad max
-sh.removeTagRange("test.tag_range", { _id : 5 }, { _id : 12 }, "a");
-countTags(1 , "Bad max: tag range does not exist");
+sh.removeTagRange("test.tag_range", {_id: 5}, {_id: 12}, "a");
+countTags(1, "Bad max: tag range does not exist");
// Invalid namesapce
-sh.removeTagRange(35, { _id : 5 }, { _id : 11 }, "a");
-countTags(1 , "Invalid namespace: tag range does not exist");
+sh.removeTagRange(35, {_id: 5}, {_id: 11}, "a");
+countTags(1, "Invalid namespace: tag range does not exist");
// Invalid tag
-sh.removeTagRange("test.tag_range", { _id : 5 }, { _id : 11 }, 35);
-countTags(1 , "Invalid tag: tag range does not exist");
+sh.removeTagRange("test.tag_range", {_id: 5}, {_id: 11}, 35);
+countTags(1, "Invalid tag: tag range does not exist");
// Invalid min
-sh.removeTagRange("test.tag_range", 35, { _id : 11 }, "a");
-countTags(1 , "Invalid min: tag range does not exist");
+sh.removeTagRange("test.tag_range", 35, {_id: 11}, "a");
+countTags(1, "Invalid min: tag range does not exist");
// Invalid max
-sh.removeTagRange("test.tag_range", { _id : 5 }, 35, "a");
-countTags(1 , "Invalid max: tag range does not exist");
+sh.removeTagRange("test.tag_range", {_id: 5}, 35, "a");
+countTags(1, "Invalid max: tag range does not exist");
s.stop();
diff --git a/jstests/sharding/test_stacked_migration_cleanup.js b/jstests/sharding/test_stacked_migration_cleanup.js
index 93f9862e756..523f5de1a0c 100644
--- a/jstests/sharding/test_stacked_migration_cleanup.js
+++ b/jstests/sharding/test_stacked_migration_cleanup.js
@@ -1,68 +1,69 @@
-// Tests "stacking" multiple migration cleanup threads and their behavior when the collection changes
+// Tests "stacking" multiple migration cleanup threads and their behavior when the collection
+// changes
(function() {
-'use strict';
+ 'use strict';
-// start up a new sharded cluster
-var st = new ShardingTest({ shards : 2, mongos : 1 });
+ // start up a new sharded cluster
+ var st = new ShardingTest({shards: 2, mongos: 1});
-var mongos = st.s;
-var admin = mongos.getDB("admin");
-var shards = mongos.getDB("config").shards.find().toArray();
-var coll = mongos.getCollection("foo.bar");
+ var mongos = st.s;
+ var admin = mongos.getDB("admin");
+ var shards = mongos.getDB("config").shards.find().toArray();
+ var coll = mongos.getCollection("foo.bar");
-// Enable sharding of the collection
-assert.commandWorked(mongos.adminCommand({ enablesharding : coll.getDB() + "" }));
-st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
-assert.commandWorked(mongos.adminCommand({ shardcollection : coll + "", key: { _id : 1 } }));
+ // Enable sharding of the collection
+ assert.commandWorked(mongos.adminCommand({enablesharding: coll.getDB() + ""}));
+ st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
+ assert.commandWorked(mongos.adminCommand({shardcollection: coll + "", key: {_id: 1}}));
-var numChunks = 30;
+ var numChunks = 30;
-// Create a bunch of chunks
-for (var i = 0; i < numChunks; i++) {
- assert.commandWorked(mongos.adminCommand({ split : coll + "", middle : { _id : i } }));
-}
+ // Create a bunch of chunks
+ for (var i = 0; i < numChunks; i++) {
+ assert.commandWorked(mongos.adminCommand({split: coll + "", middle: {_id: i}}));
+ }
-jsTest.log("Inserting a lot of small documents...");
+ jsTest.log("Inserting a lot of small documents...");
-// Insert a lot of small documents to make multiple cursor batches
-var bulk = coll.initializeUnorderedBulkOp();
-for (var i = 0; i < 10 * 1000; i++) {
- bulk.insert({ _id : i });
-}
-assert.writeOK(bulk.execute());
+ // Insert a lot of small documents to make multiple cursor batches
+ var bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < 10 * 1000; i++) {
+ bulk.insert({_id: i});
+ }
+ assert.writeOK(bulk.execute());
-jsTest.log("Opening a mongod cursor...");
+ jsTest.log("Opening a mongod cursor...");
-// Open a new cursor on the mongod
-var cursor = coll.find();
-var next = cursor.next();
+ // Open a new cursor on the mongod
+ var cursor = coll.find();
+ var next = cursor.next();
-jsTest.log("Moving a bunch of chunks to stack cleanup...");
+ jsTest.log("Moving a bunch of chunks to stack cleanup...");
-// Move a bunch of chunks, but don't close the cursor so they stack.
-for (var i = 0; i < numChunks; i++) {
- assert.commandWorked(
- mongos.adminCommand({ moveChunk : coll + "", find : { _id : i }, to : shards[1]._id }));
-}
+ // Move a bunch of chunks, but don't close the cursor so they stack.
+ for (var i = 0; i < numChunks; i++) {
+ assert.commandWorked(
+ mongos.adminCommand({moveChunk: coll + "", find: {_id: i}, to: shards[1]._id}));
+ }
-jsTest.log("Dropping and re-creating collection...");
+ jsTest.log("Dropping and re-creating collection...");
-coll.drop();
+ coll.drop();
-bulk = coll.initializeUnorderedBulkOp();
-for (var i = 0; i < numChunks; i++) {
- bulk.insert({ _id : i });
-}
-assert.writeOK(bulk.execute());
+ bulk = coll.initializeUnorderedBulkOp();
+ for (var i = 0; i < numChunks; i++) {
+ bulk.insert({_id: i});
+ }
+ assert.writeOK(bulk.execute());
-sleep(10 * 1000);
+ sleep(10 * 1000);
-jsTest.log("Checking that documents were not cleaned up...");
+ jsTest.log("Checking that documents were not cleaned up...");
-for (var i = 0; i < numChunks; i++) {
- assert.neq(null, coll.findOne({ _id : i }));
-}
+ for (var i = 0; i < numChunks; i++) {
+ assert.neq(null, coll.findOne({_id: i}));
+ }
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/top_chunk_autosplit.js b/jstests/sharding/top_chunk_autosplit.js
index 696bc3478ae..74a3e942cae 100644
--- a/jstests/sharding/top_chunk_autosplit.js
+++ b/jstests/sharding/top_chunk_autosplit.js
@@ -6,7 +6,7 @@ function shardSetup(shardConfig, dbName, collName) {
// Disable the balancer to not interfere with the test, but keep the balancer settings on
// (with default empty document) so the auto split logic will be able to move chunks around.
- assert.writeOK(st.s.getDB('config').settings.remove({ _id: 'balancer' }));
+ assert.writeOK(st.s.getDB('config').settings.remove({_id: 'balancer'}));
db.adminCommand({configureFailPoint: 'skipBalanceRound', mode: 'alwaysOn'});
return st;
}
@@ -31,13 +31,13 @@ function runTest(test) {
for (var i = 0; i < test.shards.length; i++) {
var startRange = test.shards[i].range.min;
var endRange = test.shards[i].range.max;
- var chunkSize = Math.abs(endRange-startRange)/test.shards[i].chunks;
+ var chunkSize = Math.abs(endRange - startRange) / test.shards[i].chunks;
for (var j = startRange; j < endRange; j += chunkSize) {
// No split on highest chunk
if (j + chunkSize >= MAXVAL) {
continue;
}
- db.adminCommand({split: coll + "", middle: {x: j+chunkSize}});
+ db.adminCommand({split: coll + "", middle: {x: j + chunkSize}});
db.adminCommand({moveChunk: coll + "", find: {x: j}, to: test.shards[i].name});
}
// Make sure to move chunk when there's only 1 chunk in shard
@@ -72,7 +72,10 @@ function runTest(test) {
// Insert one doc at a time until first auto-split occurs on top chunk
var xval = test.inserts.value;
do {
- var doc = {x: xval, val: largeStr};
+ var doc = {
+ x: xval,
+ val: largeStr
+ };
coll.insert(doc);
xval += test.inserts.inc;
} while (getNumberOfChunks(configDB) <= numChunks);
@@ -105,17 +108,44 @@ var configDB = st.s.getDB('config');
// Define shard key ranges for each of the shard nodes
var MINVAL = -500;
var MAXVAL = 1500;
-var lowChunkRange = {min: MINVAL, max: 0};
-var midChunkRange1 = {min: 0, max: 500};
-var midChunkRange2 = {min: 500, max: 1000};
-var highChunkRange = {min: 1000, max: MAXVAL};
-
-var lowChunkTagRange = {min: MinKey, max: 0};
-var highChunkTagRange = {min: 1000, max: MaxKey};
-
-var lowChunkInserts = {value: 0, inc: -1};
-var midChunkInserts = {value: 1, inc: 1};
-var highChunkInserts = {value: 1000, inc: 1};
+var lowChunkRange = {
+ min: MINVAL,
+ max: 0
+};
+var midChunkRange1 = {
+ min: 0,
+ max: 500
+};
+var midChunkRange2 = {
+ min: 500,
+ max: 1000
+};
+var highChunkRange = {
+ min: 1000,
+ max: MAXVAL
+};
+
+var lowChunkTagRange = {
+ min: MinKey,
+ max: 0
+};
+var highChunkTagRange = {
+ min: 1000,
+ max: MaxKey
+};
+
+var lowChunkInserts = {
+ value: 0,
+ inc: -1
+};
+var midChunkInserts = {
+ value: 1,
+ inc: 1
+};
+var highChunkInserts = {
+ value: 1000,
+ inc: 1
+};
var lowChunk = 1;
var highChunk = -1;
@@ -137,99 +167,119 @@ var highChunk = -1;
// high - high shard key value
var tests = [
{
- // Test auto-split on the "low" top chunk to another tagged shard
- name: "low top chunk with tag move",
- lowOrHigh: lowChunk,
- movedToShard: "shard0002",
- shards: [{name: "shard0000", range: lowChunkRange, chunks: 20, tags: ["NYC"]},
- {name: "shard0001", range: midChunkRange1, chunks: 20, tags: ["SF"]},
- {name: "shard0002", range: highChunkRange, chunks: 5, tags: ["NYC"]},
- {name: "shard0003", range: midChunkRange2, chunks: 1, tags: ["SF"]},
- ],
- tagRanges: [{range: lowChunkTagRange, tag: "NYC"},
- {range: highChunkTagRange, tag: "NYC"},
- {range: midChunkRange1, tag: "SF"},
- {range: midChunkRange2, tag: "SF"}],
- inserts: lowChunkInserts
+ // Test auto-split on the "low" top chunk to another tagged shard
+ name: "low top chunk with tag move",
+ lowOrHigh: lowChunk,
+ movedToShard: "shard0002",
+ shards: [
+ {name: "shard0000", range: lowChunkRange, chunks: 20, tags: ["NYC"]},
+ {name: "shard0001", range: midChunkRange1, chunks: 20, tags: ["SF"]},
+ {name: "shard0002", range: highChunkRange, chunks: 5, tags: ["NYC"]},
+ {name: "shard0003", range: midChunkRange2, chunks: 1, tags: ["SF"]},
+ ],
+ tagRanges: [
+ {range: lowChunkTagRange, tag: "NYC"},
+ {range: highChunkTagRange, tag: "NYC"},
+ {range: midChunkRange1, tag: "SF"},
+ {range: midChunkRange2, tag: "SF"}
+ ],
+ inserts: lowChunkInserts
},
{
- // Test auto-split on the "low" top chunk to same tagged shard
- name: "low top chunk with tag no move",
- lowOrHigh: lowChunk,
- movedToShard: "shard0000",
- shards: [{name: "shard0000", range: lowChunkRange, chunks: 5, tags: ["NYC"]},
- {name: "shard0001", range: midChunkRange1, chunks: 20, tags: ["SF"]},
- {name: "shard0002", range: highChunkRange, chunks: 20, tags: ["NYC"]},
- {name: "shard0003", range: midChunkRange2, chunks: 1, tags: ["SF"]},
- ],
- tagRanges: [{range: lowChunkTagRange, tag: "NYC"},
- {range: highChunkTagRange, tag: "NYC"},
- {range: midChunkRange1, tag: "SF"},
- {range: midChunkRange2, tag: "SF"}],
- inserts: lowChunkInserts
- },
+ // Test auto-split on the "low" top chunk to same tagged shard
+ name: "low top chunk with tag no move",
+ lowOrHigh: lowChunk,
+ movedToShard: "shard0000",
+ shards: [
+ {name: "shard0000", range: lowChunkRange, chunks: 5, tags: ["NYC"]},
+ {name: "shard0001", range: midChunkRange1, chunks: 20, tags: ["SF"]},
+ {name: "shard0002", range: highChunkRange, chunks: 20, tags: ["NYC"]},
+ {name: "shard0003", range: midChunkRange2, chunks: 1, tags: ["SF"]},
+ ],
+ tagRanges: [
+ {range: lowChunkTagRange, tag: "NYC"},
+ {range: highChunkTagRange, tag: "NYC"},
+ {range: midChunkRange1, tag: "SF"},
+ {range: midChunkRange2, tag: "SF"}
+ ],
+ inserts: lowChunkInserts
+ },
{
- // Test auto-split on the "low" top chunk to another shard
- name: "low top chunk no tag move",
- lowOrHigh: lowChunk,
- movedToShard: "shard0003",
- shards: [{name: "shard0000", range: lowChunkRange, chunks: 20},
- {name: "shard0001", range: midChunkRange1, chunks: 20},
- {name: "shard0002", range: highChunkRange, chunks: 5},
- {name: "shard0003", range: midChunkRange2, chunks: 1}],
- inserts: lowChunkInserts
+ // Test auto-split on the "low" top chunk to another shard
+ name: "low top chunk no tag move",
+ lowOrHigh: lowChunk,
+ movedToShard: "shard0003",
+ shards: [
+ {name: "shard0000", range: lowChunkRange, chunks: 20},
+ {name: "shard0001", range: midChunkRange1, chunks: 20},
+ {name: "shard0002", range: highChunkRange, chunks: 5},
+ {name: "shard0003", range: midChunkRange2, chunks: 1}
+ ],
+ inserts: lowChunkInserts
},
{
- // Test auto-split on the "high" top chunk to another tagged shard
- name: "high top chunk with tag move",
- lowOrHigh: highChunk,
- movedToShard: "shard0000",
- shards: [{name: "shard0000", range: lowChunkRange, chunks: 5, tags: ["NYC"]},
- {name: "shard0001", range: midChunkRange1, chunks: 20, tags: ["SF"]},
- {name: "shard0002", range: highChunkRange, chunks: 20, tags: ["NYC"]},
- {name: "shard0003", range: midChunkRange2, chunks: 1, tags: ["SF"]}],
- tagRanges: [{range: lowChunkTagRange, tag: "NYC"},
- {range: highChunkTagRange, tag: "NYC"},
- {range: midChunkRange1, tag: "SF"},
- {range: midChunkRange2, tag: "SF"}],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk to another tagged shard
+ name: "high top chunk with tag move",
+ lowOrHigh: highChunk,
+ movedToShard: "shard0000",
+ shards: [
+ {name: "shard0000", range: lowChunkRange, chunks: 5, tags: ["NYC"]},
+ {name: "shard0001", range: midChunkRange1, chunks: 20, tags: ["SF"]},
+ {name: "shard0002", range: highChunkRange, chunks: 20, tags: ["NYC"]},
+ {name: "shard0003", range: midChunkRange2, chunks: 1, tags: ["SF"]}
+ ],
+ tagRanges: [
+ {range: lowChunkTagRange, tag: "NYC"},
+ {range: highChunkTagRange, tag: "NYC"},
+ {range: midChunkRange1, tag: "SF"},
+ {range: midChunkRange2, tag: "SF"}
+ ],
+ inserts: highChunkInserts
},
{
- // Test auto-split on the "high" top chunk to another shard
- name: "high top chunk no tag move",
- lowOrHigh: highChunk,
- movedToShard: "shard0003",
- shards: [{name: "shard0000", range: lowChunkRange, chunks: 5},
- {name: "shard0001", range: midChunkRange1, chunks: 20},
- {name: "shard0002", range: highChunkRange, chunks: 20},
- {name: "shard0003", range: midChunkRange2, chunks: 1}],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk to another shard
+ name: "high top chunk no tag move",
+ lowOrHigh: highChunk,
+ movedToShard: "shard0003",
+ shards: [
+ {name: "shard0000", range: lowChunkRange, chunks: 5},
+ {name: "shard0001", range: midChunkRange1, chunks: 20},
+ {name: "shard0002", range: highChunkRange, chunks: 20},
+ {name: "shard0003", range: midChunkRange2, chunks: 1}
+ ],
+ inserts: highChunkInserts
},
{
- // Test auto-split on the "high" top chunk to same tagged shard
- name: "high top chunk with tag no move",
- lowOrHigh: highChunk,
- movedToShard: "shard0002",
- shards: [{name: "shard0000", range: lowChunkRange, chunks: 20, tags: ["NYC"]},
- {name: "shard0001", range: midChunkRange1, chunks: 20, tags: ["SF"]},
- {name: "shard0002", range: highChunkRange, chunks: 5, tags: ["NYC"]},
- {name: "shard0003", range: midChunkRange2, chunks: 1, tags: ["SF"]}],
- tagRanges: [{range: lowChunkTagRange, tag: "NYC"},
- {range: highChunkTagRange, tag: "NYC"},
- {range: midChunkRange1, tag: "SF"},
- {range: midChunkRange2, tag: "SF"}],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk to same tagged shard
+ name: "high top chunk with tag no move",
+ lowOrHigh: highChunk,
+ movedToShard: "shard0002",
+ shards: [
+ {name: "shard0000", range: lowChunkRange, chunks: 20, tags: ["NYC"]},
+ {name: "shard0001", range: midChunkRange1, chunks: 20, tags: ["SF"]},
+ {name: "shard0002", range: highChunkRange, chunks: 5, tags: ["NYC"]},
+ {name: "shard0003", range: midChunkRange2, chunks: 1, tags: ["SF"]}
+ ],
+ tagRanges: [
+ {range: lowChunkTagRange, tag: "NYC"},
+ {range: highChunkTagRange, tag: "NYC"},
+ {range: midChunkRange1, tag: "SF"},
+ {range: midChunkRange2, tag: "SF"}
+ ],
+ inserts: highChunkInserts
},
{
- // Test auto-split on the "high" top chunk to same shard
- name: "high top chunk no tag no move",
- lowOrHigh: highChunk,
- movedToShard: "shard0002",
- shards: [{name: "shard0000", range: lowChunkRange, chunks: 20},
- {name: "shard0001", range: midChunkRange1, chunks: 20},
- {name: "shard0002", range: highChunkRange, chunks: 1},
- {name: "shard0003", range: midChunkRange2, chunks: 5}],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk to same shard
+ name: "high top chunk no tag no move",
+ lowOrHigh: highChunk,
+ movedToShard: "shard0002",
+ shards: [
+ {name: "shard0000", range: lowChunkRange, chunks: 20},
+ {name: "shard0001", range: midChunkRange1, chunks: 20},
+ {name: "shard0002", range: highChunkRange, chunks: 1},
+ {name: "shard0003", range: midChunkRange2, chunks: 5}
+ ],
+ inserts: highChunkInserts
}
];
@@ -251,20 +301,20 @@ configDB = st.s.getDB('config');
var singleNodeTests = [
{
- // Test auto-split on the "low" top chunk on single node shard
- name: "single node shard - low top chunk",
- lowOrHigh: lowChunk,
- movedToShard: "shard0000",
- shards: [{name: "shard0000", range: lowChunkRange, chunks: 2}],
- inserts: lowChunkInserts
+ // Test auto-split on the "low" top chunk on single node shard
+ name: "single node shard - low top chunk",
+ lowOrHigh: lowChunk,
+ movedToShard: "shard0000",
+ shards: [{name: "shard0000", range: lowChunkRange, chunks: 2}],
+ inserts: lowChunkInserts
},
{
- // Test auto-split on the "high" top chunk on single node shard
- name: "single node shard - high top chunk",
- lowOrHigh: highChunk,
- movedToShard: "shard0000",
- shards: [{name: "shard0000", range: highChunkRange, chunks: 2}],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk on single node shard
+ name: "single node shard - high top chunk",
+ lowOrHigh: highChunk,
+ movedToShard: "shard0000",
+ shards: [{name: "shard0000", range: highChunkRange, chunks: 2}],
+ inserts: highChunkInserts
}
];
@@ -280,9 +330,8 @@ st.stop();
// maxSize test
// To set maxSize, must manually add the shards
-st = shardSetup({name: "maxSize", shards: 2, chunkSize: 1, other: {manualAddShard: true}},
- dbName,
- collName);
+st = shardSetup(
+ {name: "maxSize", shards: 2, chunkSize: 1, other: {manualAddShard: true}}, dbName, collName);
db = st.getDB(dbName);
coll = db[collName];
configDB = st.s.getDB('config');
@@ -293,32 +342,35 @@ st.adminCommand({addshard: st.getConnNames()[1], maxSize: 1});
var maxSizeTests = [
{
- // Test auto-split on the "low" top chunk with maxSize on
- // destination shard
- name: "maxSize - low top chunk",
- lowOrHigh: lowChunk,
- movedToShard: "shard0000",
- shards: [{name: "shard0000", range: lowChunkRange, chunks: 10},
- {name: "shard0001", range: highChunkRange, chunks: 1}],
- inserts: lowChunkInserts
+ // Test auto-split on the "low" top chunk with maxSize on
+ // destination shard
+ name: "maxSize - low top chunk",
+ lowOrHigh: lowChunk,
+ movedToShard: "shard0000",
+ shards: [
+ {name: "shard0000", range: lowChunkRange, chunks: 10},
+ {name: "shard0001", range: highChunkRange, chunks: 1}
+ ],
+ inserts: lowChunkInserts
},
{
- // Test auto-split on the "high" top chunk with maxSize on
- // destination shard
- name: "maxSize - high top chunk",
- lowOrHigh: highChunk,
- movedToShard: "shard0000",
- shards: [{name: "shard0000", range: highChunkRange, chunks: 10},
- {name: "shard0001", range: lowChunkRange, chunks: 1}],
- inserts: highChunkInserts
+ // Test auto-split on the "high" top chunk with maxSize on
+ // destination shard
+ name: "maxSize - high top chunk",
+ lowOrHigh: highChunk,
+ movedToShard: "shard0000",
+ shards: [
+ {name: "shard0000", range: highChunkRange, chunks: 10},
+ {name: "shard0001", range: lowChunkRange, chunks: 1}
+ ],
+ inserts: highChunkInserts
}
];
// SERVER-17070 Auto split moves to shard node running WiredTiger, if exceeding maxSize
var unsupported = ["wiredTiger", "rocksdb", "inMemory", "ephemeralForTest"];
-if (unsupported.indexOf(st.d0.adminCommand({serverStatus : 1}).storageEngine.name) == -1 &&
- unsupported.indexOf(st.d1.adminCommand({serverStatus : 1}).storageEngine.name) == -1) {
-
+if (unsupported.indexOf(st.d0.adminCommand({serverStatus: 1}).storageEngine.name) == -1 &&
+ unsupported.indexOf(st.d1.adminCommand({serverStatus: 1}).storageEngine.name) == -1) {
assert.commandWorked(db.adminCommand({enableSharding: dbName}));
db.adminCommand({movePrimary: dbName, to: 'shard0000'});
diff --git a/jstests/sharding/trace_missing_docs_test.js b/jstests/sharding/trace_missing_docs_test.js
index d7ac493cc5a..f02ccc80434 100644
--- a/jstests/sharding/trace_missing_docs_test.js
+++ b/jstests/sharding/trace_missing_docs_test.js
@@ -2,48 +2,48 @@
load('jstests/libs/trace_missing_docs.js');
(function() {
-'use strict';
+ 'use strict';
-var testDocMissing = function(useReplicaSet) {
- var options = { rs: useReplicaSet,
- shardOptions: { master: "", oplogSize: 10 },
- rsOptions: { nodes: 1, oplogSize: 10 } };
+ var testDocMissing = function(useReplicaSet) {
+ var options = {
+ rs: useReplicaSet,
+ shardOptions: {master: "", oplogSize: 10},
+ rsOptions: {nodes: 1, oplogSize: 10}
+ };
- var st = new ShardingTest({ shards: 2, mongos: 1, other: options });
+ var st = new ShardingTest({shards: 2, mongos: 1, other: options});
- var mongos = st.s0;
- var coll = mongos.getCollection("foo.bar");
- var admin = mongos.getDB("admin");
- var shards = mongos.getCollection("config.shards").find().toArray();
+ var mongos = st.s0;
+ var coll = mongos.getCollection("foo.bar");
+ var admin = mongos.getDB("admin");
+ var shards = mongos.getCollection("config.shards").find().toArray();
- assert.commandWorked(admin.runCommand({ enableSharding: coll.getDB() + "" }));
- st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
+ assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
+ st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
- coll.ensureIndex({ sk: 1 });
- assert.commandWorked(admin.runCommand({ shardCollection: coll + "", key: { sk: 1 } }));
+ coll.ensureIndex({sk: 1});
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {sk: 1}}));
- assert.writeOK(coll.insert({ _id: 12345, sk: 67890, hello: "world" }));
- assert.writeOK(coll.update({ _id: 12345 }, { $set: { baz: 'biz' } }));
- assert.writeOK(coll.update({ sk: 67890 }, { $set: { baz: 'boz' } }));
+ assert.writeOK(coll.insert({_id: 12345, sk: 67890, hello: "world"}));
+ assert.writeOK(coll.update({_id: 12345}, {$set: {baz: 'biz'}}));
+ assert.writeOK(coll.update({sk: 67890}, {$set: {baz: 'boz'}}));
- assert.commandWorked(admin.runCommand({ moveChunk: coll + "",
- find: { sk: 0 },
- to: shards[1]._id,
- _waitForDelete: true }));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: coll + "", find: {sk: 0}, to: shards[1]._id, _waitForDelete: true}));
- st.printShardingStatus();
+ st.printShardingStatus();
- var ops = traceMissingDoc(coll, { _id: 12345, sk: 67890 });
+ var ops = traceMissingDoc(coll, {_id: 12345, sk: 67890});
- assert.eq(ops[0].op, 'i');
- assert.eq(ops.length, 5);
+ assert.eq(ops[0].op, 'i');
+ assert.eq(ops.length, 5);
- jsTest.log("DONE! " + (useReplicaSet ? "(using rs)": "(using master/slave)"));
+ jsTest.log("DONE! " + (useReplicaSet ? "(using rs)" : "(using master/slave)"));
- st.stop();
-};
+ st.stop();
+ };
-testDocMissing(true);
-testDocMissing(false);
+ testDocMissing(true);
+ testDocMissing(false);
})();
diff --git a/jstests/sharding/unowned_doc_filtering.js b/jstests/sharding/unowned_doc_filtering.js
index e54cb21f3c7..2c12ef4b0cf 100644
--- a/jstests/sharding/unowned_doc_filtering.js
+++ b/jstests/sharding/unowned_doc_filtering.js
@@ -7,43 +7,44 @@
* @tags: [requires_persistence]
*/
(function() {
-"use strict";
+ "use strict";
-var st = new ShardingTest({ shards: 2 });
+ var st = new ShardingTest({shards: 2});
-var testDB = st.s.getDB('test');
+ var testDB = st.s.getDB('test');
-assert.commandWorked(testDB.adminCommand({ enableSharding: 'test' }));
-st.ensurePrimaryShard('test', 'shard0000');
-assert.commandWorked(testDB.adminCommand({ shardCollection: 'test.foo', key: { x: 1 }}));
+ assert.commandWorked(testDB.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', 'shard0000');
+ assert.commandWorked(testDB.adminCommand({shardCollection: 'test.foo', key: {x: 1}}));
-var inserts = [];
-for (var i = 0; i < 100; i++) {
- inserts.push({x:i});
-}
-assert.writeOK(testDB.foo.insert(inserts));
-
-assert.commandWorked(testDB.adminCommand({split:'test.foo', find: {x:50}}));
-assert.commandWorked(testDB.adminCommand({moveChunk:'test.foo', find:{x:100}, to: 'shard0001'}));
-
-// Insert some documents directly into the shards into chunks not owned by that shard.
-st.d0.getDB('test').foo.insert({x:100});
-st.d1.getDB('test').foo.insert({x:0});
-
-st.restartMongod(0);
-st.restartMongod(1);
-
-var fooCount;
-for (var retries = 0; retries <= 2; retries++) {
- try {
- fooCount = testDB.foo.find().itcount();
- break;
- } catch (e) {
- // expected for reestablishing connections broken by the mongod restart.
- assert.eq(ErrorCodes.HostUnreachable, e.code, tojson(e));
+ var inserts = [];
+ for (var i = 0; i < 100; i++) {
+ inserts.push({x: i});
+ }
+ assert.writeOK(testDB.foo.insert(inserts));
+
+ assert.commandWorked(testDB.adminCommand({split: 'test.foo', find: {x: 50}}));
+ assert.commandWorked(
+ testDB.adminCommand({moveChunk: 'test.foo', find: {x: 100}, to: 'shard0001'}));
+
+ // Insert some documents directly into the shards into chunks not owned by that shard.
+ st.d0.getDB('test').foo.insert({x: 100});
+ st.d1.getDB('test').foo.insert({x: 0});
+
+ st.restartMongod(0);
+ st.restartMongod(1);
+
+ var fooCount;
+ for (var retries = 0; retries <= 2; retries++) {
+ try {
+ fooCount = testDB.foo.find().itcount();
+ break;
+ } catch (e) {
+ // expected for reestablishing connections broken by the mongod restart.
+ assert.eq(ErrorCodes.HostUnreachable, e.code, tojson(e));
+ }
}
-}
-assert.eq(100, fooCount);
+ assert.eq(100, fooCount);
-st.stop();
+ st.stop();
}());
diff --git a/jstests/sharding/update_immutable_fields.js b/jstests/sharding/update_immutable_fields.js
index f0383fcf2f8..ba936808fdd 100644
--- a/jstests/sharding/update_immutable_fields.js
+++ b/jstests/sharding/update_immutable_fields.js
@@ -1,77 +1,79 @@
// Tests that updates can't change immutable fields (used in sharded system)
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards : 2, mongos : 1 });
+ var st = new ShardingTest({shards: 2, mongos: 1});
-var mongos = st.s;
-var config = mongos.getDB("config");
-var coll = mongos.getCollection(jsTestName() + ".coll1");
-var shard0 = st.shard0;
+ var mongos = st.s;
+ var config = mongos.getDB("config");
+ var coll = mongos.getCollection(jsTestName() + ".coll1");
+ var shard0 = st.shard0;
-assert.commandWorked(config.adminCommand({enableSharding : coll.getDB() + ""}));
-st.ensurePrimaryShard(coll.getDB().getName(), 'shard0000');
-assert.commandWorked(config.adminCommand({shardCollection : "" + coll, key : {a : 1}}));
+ assert.commandWorked(config.adminCommand({enableSharding: coll.getDB() + ""}));
+ st.ensurePrimaryShard(coll.getDB().getName(), 'shard0000');
+ assert.commandWorked(config.adminCommand({shardCollection: "" + coll, key: {a: 1}}));
-var getDirectShardedConn = function( st, collName ) {
+ var getDirectShardedConn = function(st, collName) {
- var shardConnWithVersion = new Mongo( st.shard0.host );
+ var shardConnWithVersion = new Mongo(st.shard0.host);
- var configConnStr = st._configDB;
+ var configConnStr = st._configDB;
- var maxChunk = st.s0.getCollection( "config.chunks" )
- .find({ ns : collName }).sort({ lastmod : -1 }).next();
+ var maxChunk =
+ st.s0.getCollection("config.chunks").find({ns: collName}).sort({lastmod: -1}).next();
- var ssvInitCmd = { setShardVersion : collName,
- authoritative : true,
- configdb : configConnStr,
- version : maxChunk.lastmod,
- shard: 'shard0000',
- versionEpoch : maxChunk.lastmodEpoch };
+ var ssvInitCmd = {
+ setShardVersion: collName,
+ authoritative: true,
+ configdb: configConnStr,
+ version: maxChunk.lastmod,
+ shard: 'shard0000',
+ versionEpoch: maxChunk.lastmodEpoch
+ };
- printjson(ssvInitCmd);
- assert.commandWorked( shardConnWithVersion.getDB( "admin" ).runCommand( ssvInitCmd ) );
+ printjson(ssvInitCmd);
+ assert.commandWorked(shardConnWithVersion.getDB("admin").runCommand(ssvInitCmd));
- return shardConnWithVersion;
-};
+ return shardConnWithVersion;
+ };
-var shard0Coll = getDirectShardedConn(st, coll.getFullName()).getCollection(coll.getFullName());
+ var shard0Coll = getDirectShardedConn(st, coll.getFullName()).getCollection(coll.getFullName());
-// No shard key
-shard0Coll.remove({});
-assert.writeError(shard0Coll.save({ _id: 3 }));
+ // No shard key
+ shard0Coll.remove({});
+ assert.writeError(shard0Coll.save({_id: 3}));
-// Full shard key in save
-assert.writeOK(shard0Coll.save({ _id: 1, a: 1 }));
+ // Full shard key in save
+ assert.writeOK(shard0Coll.save({_id: 1, a: 1}));
-// Full shard key on replacement (basically the same as above)
-shard0Coll.remove({});
-assert.writeOK(shard0Coll.update({ _id: 1 }, { a: 1 }, true));
+ // Full shard key on replacement (basically the same as above)
+ shard0Coll.remove({});
+ assert.writeOK(shard0Coll.update({_id: 1}, {a: 1}, true));
-// Full shard key after $set
-shard0Coll.remove({});
-assert.writeOK(shard0Coll.update({ _id: 1 }, { $set: { a: 1 }}, true));
+ // Full shard key after $set
+ shard0Coll.remove({});
+ assert.writeOK(shard0Coll.update({_id: 1}, {$set: {a: 1}}, true));
-// Update existing doc (replacement), same shard key value
-assert.writeOK(shard0Coll.update({ _id: 1 }, { a: 1 }));
+ // Update existing doc (replacement), same shard key value
+ assert.writeOK(shard0Coll.update({_id: 1}, {a: 1}));
-//Update existing doc ($set), same shard key value
-assert.writeOK(shard0Coll.update({ _id: 1 }, { $set: { a: 1 }}));
+ // Update existing doc ($set), same shard key value
+ assert.writeOK(shard0Coll.update({_id: 1}, {$set: {a: 1}}));
-// Error due to mutating the shard key (replacement)
-assert.writeError(shard0Coll.update({ _id: 1 }, { b: 1 }));
+ // Error due to mutating the shard key (replacement)
+ assert.writeError(shard0Coll.update({_id: 1}, {b: 1}));
-// Error due to mutating the shard key ($set)
-assert.writeError(shard0Coll.update({ _id: 1 }, { $unset: { a: 1 }}));
+ // Error due to mutating the shard key ($set)
+ assert.writeError(shard0Coll.update({_id: 1}, {$unset: {a: 1}}));
-// Error due to removing all the embedded fields.
-shard0Coll.remove({});
+ // Error due to removing all the embedded fields.
+ shard0Coll.remove({});
-assert.writeOK(shard0Coll.save({ _id: 2, a: { c: 1, b: 1 }}));
+ assert.writeOK(shard0Coll.save({_id: 2, a: {c: 1, b: 1}}));
-assert.writeError(shard0Coll.update({}, { $unset: { "a.c": 1 }}));
-assert.writeError(shard0Coll.update({}, { $unset: { "a.b": 1, "a.c": 1 }}));
+ assert.writeError(shard0Coll.update({}, {$unset: {"a.c": 1}}));
+ assert.writeError(shard0Coll.update({}, {$unset: {"a.b": 1, "a.c": 1}}));
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/update_sharded.js b/jstests/sharding/update_sharded.js
index e76521f2377..42a2954107e 100644
--- a/jstests/sharding/update_sharded.js
+++ b/jstests/sharding/update_sharded.js
@@ -2,103 +2,99 @@
// since shard key is immutable.
(function() {
-var s = new ShardingTest({ name: "auto1", shards: 2, mongos: 1 });
-
-s.adminCommand( { enablesharding : "test" } );
-s.ensurePrimaryShard('test', 'shard0001');
-
-// repeat same tests with hashed shard key, to ensure identical behavior
-s.adminCommand( { shardcollection : "test.update0" , key : { key : 1 } } );
-s.adminCommand( { shardcollection : "test.update1" , key : { key : "hashed" } } );
-
-db = s.getDB( "test" );
-
-for(i=0; i < 2; i++){
- coll = db.getCollection("update" + i);
-
- coll.insert({_id:1, key:1});
-
- // these are both upserts
- coll.save({_id:2, key:2});
- coll.update({_id:3, key:3}, {$set: {foo: 'bar'}}, {upsert: true});
-
- assert.eq(coll.count(), 3, "count A");
- assert.eq(coll.findOne({_id:3}).key, 3 , "findOne 3 key A");
- assert.eq(coll.findOne({_id:3}).foo, 'bar' , "findOne 3 foo A");
-
- // update existing using save()
- coll.save({_id:1, key:1, other:1});
-
- // update existing using update()
- coll.update({_id:2}, {key:2, other:2});
- coll.update({_id:3}, {key:3, other:3});
-
- // do a replacement-style update which queries the shard key and keeps it constant
- coll.save( {_id:4, key:4} );
- coll.update({key:4}, {key:4, other:4});
- assert.eq( coll.find({key:4, other:4}).count() , 1 , 'replacement update error');
- coll.remove( {_id:4} );
-
- assert.eq(coll.count(), 3, "count B");
- coll.find().forEach(function(x){
- assert.eq(x._id, x.key, "_id == key");
- assert.eq(x._id, x.other, "_id == other");
- });
-
- assert.writeError(coll.update({ _id: 1, key: 1 }, { $set: { key: 2 }}));
- assert.eq(coll.findOne({_id:1}).key, 1, 'key unchanged');
-
- assert.writeOK(coll.update({ _id: 1, key: 1 }, { $set: { foo: 2 }}));
-
- coll.update( { key : 17 } , { $inc : { x : 5 } } , true );
- assert.eq( 5 , coll.findOne( { key : 17 } ).x , "up1" );
-
- coll.update( { key : 18 } , { $inc : { x : 5 } } , true , true );
- assert.eq( 5 , coll.findOne( { key : 18 } ).x , "up2" );
-
- // Make sure we can extract exact _id from certain queries
- assert.writeOK(coll.update({_id : ObjectId()}, {$set : {x : 1}}, {multi : false}));
- assert.writeOK(coll.update({_id : {$eq : ObjectId()}}, {$set : {x : 1}}, {multi : false}));
- assert.writeOK(coll.update({_id : {$all : [ObjectId()]}}, {$set : {x : 1}}, {multi : false}));
- assert.writeOK(coll.update({$or : [{_id : ObjectId()}]}, {$set : {x : 1}}, {multi : false}));
- assert.writeOK(coll.update({$and : [{_id : ObjectId()}]}, {$set : {x : 1}}, {multi : false}));
-
- // Invalid extraction of exact _id from query
- assert.writeError(coll.update({}, {$set : {x : 1}}, {multi : false}));
- assert.writeError(coll.update({_id : {$gt : ObjectId()}}, {$set : {x : 1}}, {multi : false}));
- assert.writeError(coll.update({_id : {$in : [ObjectId()]}}, {$set : {x : 1}}, {multi : false}));
- assert.writeError(coll.update({$or : [{_id : ObjectId()}, {_id : ObjectId()}]},
- {$set : {x : 1}},
- {multi : false}));
- assert.writeError(coll.update({$and : [{_id : ObjectId()}, {_id : ObjectId()}]},
- {$set : {x : 1}},
- {multi : false}));
- assert.writeError(coll.update({'_id.x' : ObjectId()}, {$set : {x : 1}}, {multi : false}));
-
- // Make sure we can extract exact shard key from certain queries
- assert.writeOK(coll.update({key : ObjectId()}, {$set : {x : 1}}, {multi : false}));
- assert.writeOK(coll.update({key : {$eq : ObjectId()}}, {$set : {x : 1}}, {multi : false}));
- assert.writeOK(coll.update({key : {$all : [ObjectId()]}}, {$set : {x : 1}}, {multi : false}));
- assert.writeOK(coll.update({$or : [{key : ObjectId()}]}, {$set : {x : 1}}, {multi : false}));
- assert.writeOK(coll.update({$and : [{key : ObjectId()}]}, {$set : {x : 1}}, {multi : false}));
-
- // Invalid extraction of exact key from query
- assert.writeError(coll.update({}, {$set : {x : 1}}, {multi : false}));
- assert.writeError(coll.update({key : {$gt : ObjectId()}}, {$set : {x : 1}}, {multi : false}));
- assert.writeError(coll.update({key : {$in : [ObjectId()]}}, {$set : {x : 1}}, {multi : false}));
- assert.writeError(coll.update({$or : [{key : ObjectId()}, {key : ObjectId()}]},
- {$set : {x : 1}},
- {multi : false}));
- assert.writeError(coll.update({$and : [{key : ObjectId()}, {key : ObjectId()}]},
- {$set : {x : 1}},
- {multi : false}));
- assert.writeError(coll.update({'key.x' : ObjectId()}, {$set : {x : 1}}, {multi : false}));
-
- // Make sure failed shard key or _id extraction doesn't affect the other
- assert.writeOK(coll.update({'_id.x' : ObjectId(), key : 1}, {$set : {x : 1}}, {multi : false}));
- assert.writeOK(coll.update({_id : ObjectId(), 'key.x' : 1}, {$set : {x : 1}}, {multi : false}));
-}
-
-s.stop();
+ var s = new ShardingTest({name: "auto1", shards: 2, mongos: 1});
+
+ s.adminCommand({enablesharding: "test"});
+ s.ensurePrimaryShard('test', 'shard0001');
+
+ // repeat same tests with hashed shard key, to ensure identical behavior
+ s.adminCommand({shardcollection: "test.update0", key: {key: 1}});
+ s.adminCommand({shardcollection: "test.update1", key: {key: "hashed"}});
+
+ db = s.getDB("test");
+
+ for (i = 0; i < 2; i++) {
+ coll = db.getCollection("update" + i);
+
+ coll.insert({_id: 1, key: 1});
+
+ // these are both upserts
+ coll.save({_id: 2, key: 2});
+ coll.update({_id: 3, key: 3}, {$set: {foo: 'bar'}}, {upsert: true});
+
+ assert.eq(coll.count(), 3, "count A");
+ assert.eq(coll.findOne({_id: 3}).key, 3, "findOne 3 key A");
+ assert.eq(coll.findOne({_id: 3}).foo, 'bar', "findOne 3 foo A");
+
+ // update existing using save()
+ coll.save({_id: 1, key: 1, other: 1});
+
+ // update existing using update()
+ coll.update({_id: 2}, {key: 2, other: 2});
+ coll.update({_id: 3}, {key: 3, other: 3});
+
+ // do a replacement-style update which queries the shard key and keeps it constant
+ coll.save({_id: 4, key: 4});
+ coll.update({key: 4}, {key: 4, other: 4});
+ assert.eq(coll.find({key: 4, other: 4}).count(), 1, 'replacement update error');
+ coll.remove({_id: 4});
+
+ assert.eq(coll.count(), 3, "count B");
+ coll.find().forEach(function(x) {
+ assert.eq(x._id, x.key, "_id == key");
+ assert.eq(x._id, x.other, "_id == other");
+ });
+
+ assert.writeError(coll.update({_id: 1, key: 1}, {$set: {key: 2}}));
+ assert.eq(coll.findOne({_id: 1}).key, 1, 'key unchanged');
+
+ assert.writeOK(coll.update({_id: 1, key: 1}, {$set: {foo: 2}}));
+
+ coll.update({key: 17}, {$inc: {x: 5}}, true);
+ assert.eq(5, coll.findOne({key: 17}).x, "up1");
+
+ coll.update({key: 18}, {$inc: {x: 5}}, true, true);
+ assert.eq(5, coll.findOne({key: 18}).x, "up2");
+
+ // Make sure we can extract exact _id from certain queries
+ assert.writeOK(coll.update({_id: ObjectId()}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({_id: {$eq: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({_id: {$all: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({$or: [{_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({$and: [{_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+
+ // Invalid extraction of exact _id from query
+ assert.writeError(coll.update({}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update({_id: {$gt: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update({_id: {$in: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update(
+ {$or: [{_id: ObjectId()}, {_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update(
+ {$and: [{_id: ObjectId()}, {_id: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update({'_id.x': ObjectId()}, {$set: {x: 1}}, {multi: false}));
+
+ // Make sure we can extract exact shard key from certain queries
+ assert.writeOK(coll.update({key: ObjectId()}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({key: {$eq: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({key: {$all: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({$or: [{key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({$and: [{key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+
+ // Invalid extraction of exact key from query
+ assert.writeError(coll.update({}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update({key: {$gt: ObjectId()}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update({key: {$in: [ObjectId()]}}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update(
+ {$or: [{key: ObjectId()}, {key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update(
+ {$and: [{key: ObjectId()}, {key: ObjectId()}]}, {$set: {x: 1}}, {multi: false}));
+ assert.writeError(coll.update({'key.x': ObjectId()}, {$set: {x: 1}}, {multi: false}));
+
+ // Make sure failed shard key or _id extraction doesn't affect the other
+ assert.writeOK(coll.update({'_id.x': ObjectId(), key: 1}, {$set: {x: 1}}, {multi: false}));
+ assert.writeOK(coll.update({_id: ObjectId(), 'key.x': 1}, {$set: {x: 1}}, {multi: false}));
+ }
+
+ s.stop();
})();
diff --git a/jstests/sharding/upsert_sharded.js b/jstests/sharding/upsert_sharded.js
index 7a31c350ef1..bf880df027f 100644
--- a/jstests/sharding/upsert_sharded.js
+++ b/jstests/sharding/upsert_sharded.js
@@ -3,105 +3,100 @@
// NOTE: Generic upsert behavior tests belong in the core suite
//
(function() {
-'use strict';
-
-var st = new ShardingTest({ shards : 2, mongos : 1 });
-
-var mongos = st.s0;
-var admin = mongos.getDB( "admin" );
-var shards = mongos.getCollection( "config.shards" ).find().toArray();
-var coll = mongos.getCollection( "foo.bar" );
-
-assert( admin.runCommand({ enableSharding : coll.getDB() + "" }).ok );
-st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
-
-var upsertedResult = function(query, expr) {
- coll.remove({});
- return coll.update(query, expr, { upsert : true });
-};
-
-var upsertedField = function(query, expr, fieldName) {
- assert.writeOK(upsertedResult(query, expr));
- return coll.findOne()[fieldName];
-};
-
-var upsertedId = function(query, expr) {
- return upsertedField(query, expr, "_id");
-};
-
-var upsertedXVal = function(query, expr) {
- return upsertedField(query, expr, "x");
-};
-
-st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
-assert.commandWorked(admin.runCommand({ shardCollection : coll + "", key : { x : 1 } }));
-assert.commandWorked(admin.runCommand({ split : coll + "", middle : { x : 0 } }));
-assert.commandWorked(admin.runCommand({ moveChunk : coll + "",
- find : { x : 0 },
- to : shards[1]._id,
- _waitForDelete : true }));
-
-st.printShardingStatus();
-
-// upserted update replacement would result in no shard key
-assert.writeError(upsertedResult({ x : 1 }, {}));
-
-// updates with upsert must contain shard key in query when $op style
-assert.eq(1, upsertedXVal({ x : 1 }, { $set : { a : 1 } }));
-assert.eq(1, upsertedXVal({ x : { $eq : 1 } }, { $set : { a : 1 } }));
-assert.eq(1, upsertedXVal({ x : { $all : [1] } }, { $set : { a : 1 } }));
-assert.eq(1, upsertedXVal({ $and : [{ x : { $eq : 1 } }] }, { $set : { a : 1 } }));
-assert.eq(1, upsertedXVal({ $or : [{ x : { $eq : 1 } }] }, { $set : { a : 1 } }));
-
-// shard key not extracted
-assert.writeError(upsertedResult({}, { $set : { a : 1, x : 1 } }));
-assert.writeError(upsertedResult({ x : { $gt : 1 } }, { $set : { a : 1, x : 1 } }));
-assert.writeError(upsertedResult({ x : { $in : [1] } }, { $set : { a : 1, x : 1 } }));
-
-// Shard key type errors
-assert.writeError(upsertedResult({ x : undefined }, { $set : { a : 1 } }));
-assert.writeError(upsertedResult({ x : [1, 2] }, { $set : { a : 1 } }));
-assert.writeError(upsertedResult({ x : { $eq : { $gt : 5 } } }, { $set : { a : 1 } }));
-// Regex shard key is not extracted from queries, even exact matches
-assert.writeError(upsertedResult({ x : { $eq : /abc/ } }, { $set : { a : 1 } }));
-
-// nested field extraction always fails with non-nested key - like _id, we require setting the
-// elements directly
-assert.writeError(upsertedResult({ "x.x" : 1 }, { $set : { a : 1 } }));
-assert.writeError(upsertedResult({ "x.x" : { $eq : 1 } }, { $set : { a : 1 } }));
-
-coll.drop();
-
-st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
-assert.commandWorked(admin.runCommand({ shardCollection : coll + "", key : { 'x.x' : 1 } }));
-assert.commandWorked( admin.runCommand({ split : coll + "", middle : { 'x.x' : 0 } }));
-assert.commandWorked( admin.runCommand({ moveChunk : coll + "",
- find : { 'x.x' : 0 },
- to : shards[1]._id,
- _waitForDelete : true }));
-
-st.printShardingStatus();
-
-// nested field extraction with nested shard key
-assert.docEq({ x : 1 }, upsertedXVal({ "x.x" : 1 }, { $set : { a : 1 } }));
-assert.docEq({ x : 1 }, upsertedXVal({ "x.x" : { $eq : 1 } }, { $set : { a : 1 } }));
-assert.docEq({ x : 1 }, upsertedXVal({ "x.x" : { $all : [1] } }, { $set : { a : 1 } }));
-assert.docEq({ x : 1 }, upsertedXVal({ $and : [{ "x.x" : { $eq : 1 } }] }, { $set : { a : 1 } }));
-assert.docEq({ x : 1 }, upsertedXVal({ $or : [{ "x.x" : { $eq : 1 } }] }, { $set : { a : 1 } }));
-
-// Can specify siblings of nested shard keys
-assert.docEq({ x : 1, y : 1 }, upsertedXVal({ "x.x" : 1, "x.y" : 1 }, { $set : { a : 1 } }));
-assert.docEq({ x : 1, y : { z : 1 } },
- upsertedXVal({ "x.x" : 1, "x.y.z" : 1 }, { $set : { a : 1 } }));
-
-// No arrays at any level
-assert.writeError(upsertedResult({ "x.x" : [] }, { $set : { a : 1 } }));
-assert.writeError(upsertedResult({ x : { x : [] } }, { $set : { a : 1 } }));
-assert.writeError(upsertedResult({ x : [{ x : 1 }] }, { $set : { a : 1 } }));
-
-// Can't set sub-fields of nested key
-assert.writeError(upsertedResult({ "x.x.x" : { $eq : 1 } }, { $set : { a : 1 } }));
-
-st.stop();
+ 'use strict';
+
+ var st = new ShardingTest({shards: 2, mongos: 1});
+
+ var mongos = st.s0;
+ var admin = mongos.getDB("admin");
+ var shards = mongos.getCollection("config.shards").find().toArray();
+ var coll = mongos.getCollection("foo.bar");
+
+ assert(admin.runCommand({enableSharding: coll.getDB() + ""}).ok);
+ st.ensurePrimaryShard(coll.getDB().getName(), 'shard0001');
+
+ var upsertedResult = function(query, expr) {
+ coll.remove({});
+ return coll.update(query, expr, {upsert: true});
+ };
+
+ var upsertedField = function(query, expr, fieldName) {
+ assert.writeOK(upsertedResult(query, expr));
+ return coll.findOne()[fieldName];
+ };
+
+ var upsertedId = function(query, expr) {
+ return upsertedField(query, expr, "_id");
+ };
+
+ var upsertedXVal = function(query, expr) {
+ return upsertedField(query, expr, "x");
+ };
+
+ st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {x: 1}}));
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {x: 0}}));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: coll + "", find: {x: 0}, to: shards[1]._id, _waitForDelete: true}));
+
+ st.printShardingStatus();
+
+ // upserted update replacement would result in no shard key
+ assert.writeError(upsertedResult({x: 1}, {}));
+
+ // updates with upsert must contain shard key in query when $op style
+ assert.eq(1, upsertedXVal({x: 1}, {$set: {a: 1}}));
+ assert.eq(1, upsertedXVal({x: {$eq: 1}}, {$set: {a: 1}}));
+ assert.eq(1, upsertedXVal({x: {$all: [1]}}, {$set: {a: 1}}));
+ assert.eq(1, upsertedXVal({$and: [{x: {$eq: 1}}]}, {$set: {a: 1}}));
+ assert.eq(1, upsertedXVal({$or: [{x: {$eq: 1}}]}, {$set: {a: 1}}));
+
+ // shard key not extracted
+ assert.writeError(upsertedResult({}, {$set: {a: 1, x: 1}}));
+ assert.writeError(upsertedResult({x: {$gt: 1}}, {$set: {a: 1, x: 1}}));
+ assert.writeError(upsertedResult({x: {$in: [1]}}, {$set: {a: 1, x: 1}}));
+
+ // Shard key type errors
+ assert.writeError(upsertedResult({x: undefined}, {$set: {a: 1}}));
+ assert.writeError(upsertedResult({x: [1, 2]}, {$set: {a: 1}}));
+ assert.writeError(upsertedResult({x: {$eq: {$gt: 5}}}, {$set: {a: 1}}));
+ // Regex shard key is not extracted from queries, even exact matches
+ assert.writeError(upsertedResult({x: {$eq: /abc/}}, {$set: {a: 1}}));
+
+ // nested field extraction always fails with non-nested key - like _id, we require setting the
+ // elements directly
+ assert.writeError(upsertedResult({"x.x": 1}, {$set: {a: 1}}));
+ assert.writeError(upsertedResult({"x.x": {$eq: 1}}, {$set: {a: 1}}));
+
+ coll.drop();
+
+ st.ensurePrimaryShard(coll.getDB() + "", shards[0]._id);
+ assert.commandWorked(admin.runCommand({shardCollection: coll + "", key: {'x.x': 1}}));
+ assert.commandWorked(admin.runCommand({split: coll + "", middle: {'x.x': 0}}));
+ assert.commandWorked(admin.runCommand(
+ {moveChunk: coll + "", find: {'x.x': 0}, to: shards[1]._id, _waitForDelete: true}));
+
+ st.printShardingStatus();
+
+ // nested field extraction with nested shard key
+ assert.docEq({x: 1}, upsertedXVal({"x.x": 1}, {$set: {a: 1}}));
+ assert.docEq({x: 1}, upsertedXVal({"x.x": {$eq: 1}}, {$set: {a: 1}}));
+ assert.docEq({x: 1}, upsertedXVal({"x.x": {$all: [1]}}, {$set: {a: 1}}));
+ assert.docEq({x: 1}, upsertedXVal({$and: [{"x.x": {$eq: 1}}]}, {$set: {a: 1}}));
+ assert.docEq({x: 1}, upsertedXVal({$or: [{"x.x": {$eq: 1}}]}, {$set: {a: 1}}));
+
+ // Can specify siblings of nested shard keys
+ assert.docEq({x: 1, y: 1}, upsertedXVal({"x.x": 1, "x.y": 1}, {$set: {a: 1}}));
+ assert.docEq({x: 1, y: {z: 1}}, upsertedXVal({"x.x": 1, "x.y.z": 1}, {$set: {a: 1}}));
+
+ // No arrays at any level
+ assert.writeError(upsertedResult({"x.x": []}, {$set: {a: 1}}));
+ assert.writeError(upsertedResult({x: {x: []}}, {$set: {a: 1}}));
+ assert.writeError(upsertedResult({x: [{x: 1}]}, {$set: {a: 1}}));
+
+ // Can't set sub-fields of nested key
+ assert.writeError(upsertedResult({"x.x.x": {$eq: 1}}, {$set: {a: 1}}));
+
+ st.stop();
})();
diff --git a/jstests/sharding/user_flags_sharded.js b/jstests/sharding/user_flags_sharded.js
index 1ceadd1b2fd..f629c3b759f 100644
--- a/jstests/sharding/user_flags_sharded.js
+++ b/jstests/sharding/user_flags_sharded.js
@@ -2,64 +2,63 @@
// then collection is sharded, flags get carried over.
(function() {
-if (jsTest.options().storageEngine === "mmapv1") {
- // the dbname and collection we'll be working with
- var dbname = "testDB";
- var coll = "userFlagsColl";
- var ns = dbname + "." + coll;
+ if (jsTest.options().storageEngine === "mmapv1") {
+ // the dbname and collection we'll be working with
+ var dbname = "testDB";
+ var coll = "userFlagsColl";
+ var ns = dbname + "." + coll;
- // First create fresh collection on a new standalone mongod
- var newShardConn = MongoRunner.runMongod({});
- var db1 = newShardConn.getDB( dbname );
- var t = db1.getCollection( coll );
- print(t);
- db1.getCollection( coll ).drop(); //in case collection already existed
- db1.createCollection( coll );
+ // First create fresh collection on a new standalone mongod
+ var newShardConn = MongoRunner.runMongod({});
+ var db1 = newShardConn.getDB(dbname);
+ var t = db1.getCollection(coll);
+ print(t);
+ db1.getCollection(coll).drop(); // in case collection already existed
+ db1.createCollection(coll);
- // Then verify the new collection has userFlags set to 0
- var collstats = db1.getCollection( coll ).stats();
- print( "*************** Fresh Collection Stats ************" );
- printjson( collstats );
- assert.eq( collstats.userFlags , 1 , "fresh collection doesn't have userFlags = 1 ");
+ // Then verify the new collection has userFlags set to 0
+ var collstats = db1.getCollection(coll).stats();
+ print("*************** Fresh Collection Stats ************");
+ printjson(collstats);
+ assert.eq(collstats.userFlags, 1, "fresh collection doesn't have userFlags = 1 ");
- // Now we modify the collection with the usePowerOf2Sizes flag
- var res = db1.runCommand( { "collMod" : coll , "usePowerOf2Sizes" : false } );
- assert.eq( res.ok , 1 , "collMod failed" );
+ // Now we modify the collection with the usePowerOf2Sizes flag
+ var res = db1.runCommand({"collMod": coll, "usePowerOf2Sizes": false});
+ assert.eq(res.ok, 1, "collMod failed");
- // and insert some stuff, for the hell of it
- var numdocs = 20;
- for( i=0; i < numdocs; i++){
- assert.writeOK(db1.getCollection( coll ).insert({ _id : i }));
- }
+ // and insert some stuff, for the hell of it
+ var numdocs = 20;
+ for (i = 0; i < numdocs; i++) {
+ assert.writeOK(db1.getCollection(coll).insert({_id: i}));
+ }
- // Next verify that userFlags has changed to 0
- collstats = db1.getCollection( coll ).stats();
- print( "*************** Collection Stats After CollMod ************" );
- printjson( collstats );
- assert.eq( collstats.userFlags , 0 , "modified collection should have userFlags = 0 ");
+ // Next verify that userFlags has changed to 0
+ collstats = db1.getCollection(coll).stats();
+ print("*************** Collection Stats After CollMod ************");
+ printjson(collstats);
+ assert.eq(collstats.userFlags, 0, "modified collection should have userFlags = 0 ");
- // start up a new sharded cluster, and add previous mongod
- var s = new ShardingTest({ name: "user_flags", shards: 1 });
- assert( s.admin.runCommand( { addshard: newShardConn.host , name: "myShard" } ).ok,
- "did not accept new shard" );
+ // start up a new sharded cluster, and add previous mongod
+ var s = new ShardingTest({name: "user_flags", shards: 1});
+ assert(s.admin.runCommand({addshard: newShardConn.host, name: "myShard"}).ok,
+ "did not accept new shard");
- // enable sharding of the collection. Only 1 chunk initially, so move it to
- // other shard to create the collection on that shard
- s.adminCommand( { enablesharding : dbname } );
- s.adminCommand( { shardcollection : ns , key: { _id : 1 } } );
- s.adminCommand({ moveChunk: ns, find: { _id: 1 },
- to: "shard0000", _waitForDelete: true });
+ // enable sharding of the collection. Only 1 chunk initially, so move it to
+ // other shard to create the collection on that shard
+ s.adminCommand({enablesharding: dbname});
+ s.adminCommand({shardcollection: ns, key: {_id: 1}});
+ s.adminCommand({moveChunk: ns, find: {_id: 1}, to: "shard0000", _waitForDelete: true});
- print( "*************** Collection Stats On Other Shard ************" );
- var shard2 = s._connections[0].getDB( dbname );
- shard2stats = shard2.getCollection( coll ).stats();
- printjson( shard2stats );
+ print("*************** Collection Stats On Other Shard ************");
+ var shard2 = s._connections[0].getDB(dbname);
+ shard2stats = shard2.getCollection(coll).stats();
+ printjson(shard2stats);
- assert.eq( shard2stats.count , numdocs , "moveChunk didn't succeed" );
- assert.eq( shard2stats.userFlags , 0 , "new shard should also have userFlags = 0 ");
+ assert.eq(shard2stats.count, numdocs, "moveChunk didn't succeed");
+ assert.eq(shard2stats.userFlags, 0, "new shard should also have userFlags = 0 ");
- MongoRunner.stopMongod(newShardConn);
- s.stop();
-}
+ MongoRunner.stopMongod(newShardConn);
+ s.stop();
+ }
})();
diff --git a/jstests/sharding/version1.js b/jstests/sharding/version1.js
index c79d645b0fc..0e15e6180b1 100644
--- a/jstests/sharding/version1.js
+++ b/jstests/sharding/version1.js
@@ -1,77 +1,93 @@
(function() {
-var s = new ShardingTest({ name: "version1", shards: 1 });
-
-s.adminCommand( { enablesharding : "alleyinsider" } );
-s.adminCommand( { shardcollection : "alleyinsider.foo" , key : { num : 1 } } );
-
-// alleyinsider.foo is supposed to have one chunk, version 1|0, in shard000
-s.printShardingStatus();
-
-a = s._connections[0].getDB( "admin" );
-
-assert.commandFailed(a.runCommand({ setShardVersion: "alleyinsider.foo", configdb: s._configDB }));
-
-assert.commandFailed(a.runCommand({ setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: "a" }));
-
-assert.commandFailed(a.runCommand({ setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- authoritative: true }));
-
-assert.commandFailed(a.runCommand({ setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(2, 0)}),
- "should have failed b/c no auth" );
-
-assert.commandFailed(a.runCommand({ setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(2, 0),
- authoritative: true }),
- "should have failed because first setShardVersion needs shard info");
-
-assert.commandFailed(a.runCommand({ setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(2, 0),
- authoritative: true,
- shard: "shard0000",
- shardHost: s.s.host }),
- "should have failed because version is config is 1|0");
-
-var epoch = s.getDB('config').chunks.findOne().lastmodEpoch;
-assert.commandWorked( a.runCommand({ setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(1, 0),
- versionEpoch: epoch,
- authoritative: true,
- shard: "shard0000",
- shardHost: s.s.host }),
- "should have worked" );
-
-assert.commandFailed(a.runCommand({ setShardVersion: "alleyinsider.foo",
- configdb: "a",
- version: new Timestamp(0, 2),
- versionEpoch: epoch }));
-
-assert.commandFailed(a.runCommand({ setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(0, 2),
- versionEpoch: epoch }));
-
-assert.commandFailed(a.runCommand({ setShardVersion: "alleyinsider.foo",
- configdb: s._configDB,
- version: new Timestamp(0, 1),
- versionEpoch: epoch }));
-
-// the only way that setSharVersion passes is if the shard agrees with the version
-// the shard takes its version from config directly
-// TODO bump timestamps in config
-// assert.eq( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB , version : 3 } ).oldVersion.i , 2 , "oldVersion" );
-
-// assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).mine.i , 3 , "my get version A" );
-// assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).global.i , 3 , "my get version B" );
-
-s.stop();
+ var s = new ShardingTest({name: "version1", shards: 1});
+
+ s.adminCommand({enablesharding: "alleyinsider"});
+ s.adminCommand({shardcollection: "alleyinsider.foo", key: {num: 1}});
+
+ // alleyinsider.foo is supposed to have one chunk, version 1|0, in shard000
+ s.printShardingStatus();
+
+ a = s._connections[0].getDB("admin");
+
+ assert.commandFailed(
+ a.runCommand({setShardVersion: "alleyinsider.foo", configdb: s._configDB}));
+
+ assert.commandFailed(
+ a.runCommand({setShardVersion: "alleyinsider.foo", configdb: s._configDB, version: "a"}));
+
+ assert.commandFailed(a.runCommand(
+ {setShardVersion: "alleyinsider.foo", configdb: s._configDB, authoritative: true}));
+
+ assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(2, 0)
+ }),
+ "should have failed b/c no auth");
+
+ assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(2, 0),
+ authoritative: true
+ }),
+ "should have failed because first setShardVersion needs shard info");
+
+ assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(2, 0),
+ authoritative: true,
+ shard: "shard0000",
+ shardHost: s.s.host
+ }),
+ "should have failed because version is config is 1|0");
+
+ var epoch = s.getDB('config').chunks.findOne().lastmodEpoch;
+ assert.commandWorked(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(1, 0),
+ versionEpoch: epoch,
+ authoritative: true,
+ shard: "shard0000",
+ shardHost: s.s.host
+ }),
+ "should have worked");
+
+ assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: "a",
+ version: new Timestamp(0, 2),
+ versionEpoch: epoch
+ }));
+
+ assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(0, 2),
+ versionEpoch: epoch
+ }));
+
+ assert.commandFailed(a.runCommand({
+ setShardVersion: "alleyinsider.foo",
+ configdb: s._configDB,
+ version: new Timestamp(0, 1),
+ versionEpoch: epoch
+ }));
+
+ // the only way that setSharVersion passes is if the shard agrees with the version
+ // the shard takes its version from config directly
+ // TODO bump timestamps in config
+ // assert.eq( a.runCommand( { "setShardVersion" : "alleyinsider.foo" , configdb : s._configDB ,
+ // version : 3 } ).oldVersion.i , 2 , "oldVersion" );
+
+ // assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).mine.i , 3 , "my get
+ // version A" );
+ // assert.eq( a.runCommand( { "getShardVersion" : "alleyinsider.foo" } ).global.i , 3 , "my get
+ // version B" );
+
+ s.stop();
})();
diff --git a/jstests/sharding/version2.js b/jstests/sharding/version2.js
index 0bf8c5892b5..6bdc4601206 100644
--- a/jstests/sharding/version2.js
+++ b/jstests/sharding/version2.js
@@ -1,21 +1,22 @@
(function() {
-'use strict';
+ 'use strict';
-var s = new ShardingTest({ name: "version2", shards: 1 });
+ var s = new ShardingTest({name: "version2", shards: 1});
-assert.commandWorked(s.s0.adminCommand({ enablesharding: "alleyinsider" }));
-assert.commandWorked(s.s0.adminCommand({ shardcollection: "alleyinsider.foo", key: { num: 1 } }));
-assert.commandWorked(s.s0.adminCommand({ shardcollection: "alleyinsider.bar", key: { num: 1 } }));
+ assert.commandWorked(s.s0.adminCommand({enablesharding: "alleyinsider"}));
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "alleyinsider.foo", key: {num: 1}}));
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "alleyinsider.bar", key: {num: 1}}));
-var a = s._connections[0].getDB("admin");
+ var a = s._connections[0].getDB("admin");
-// Setup from one client
-assert.eq(a.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).mine.i, 0);
-assert.eq(a.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).global.i, 0);
+ // Setup from one client
+ assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).mine.i,
+ 0);
+ assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).global.i,
+ 0);
-var fooEpoch = s.getDB('config').chunks.findOne({ ns: 'alleyinsider.foo' }).lastmodEpoch;
-assert.commandWorked(
- a.runCommand({
+ var fooEpoch = s.getDB('config').chunks.findOne({ns: 'alleyinsider.foo'}).lastmodEpoch;
+ assert.commandWorked(a.runCommand({
setShardVersion: "alleyinsider.foo",
configdb: s._configDB,
authoritative: true,
@@ -25,44 +26,54 @@ assert.commandWorked(
shardHost: s.s.host,
}));
-printjson(s.config.chunks.findOne());
+ printjson(s.config.chunks.findOne());
-assert.eq(a.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).mine.t, 1);
-assert.eq(a.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).global.t, 1);
+ assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).mine.t,
+ 1);
+ assert.eq(a.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).global.t,
+ 1);
-// From a different client
-var a2 = connect(s._connections[0].name + "/admin");
+ // From a different client
+ var a2 = connect(s._connections[0].name + "/admin");
-assert.eq(a2.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).global.t, 1, "a2 global 1");
-assert.eq(a2.runCommand({ "getShardVersion": "alleyinsider.foo", configdb: s._configDB }).mine.i, 0, "a2 mine 1");
+ assert.eq(
+ a2.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).global.t,
+ 1,
+ "a2 global 1");
+ assert.eq(a2.runCommand({"getShardVersion": "alleyinsider.foo", configdb: s._configDB}).mine.i,
+ 0,
+ "a2 mine 1");
-function simpleFindOne(){
- return a2.getMongo().getDB("alleyinsider").foo.findOne();
-}
+ function simpleFindOne() {
+ return a2.getMongo().getDB("alleyinsider").foo.findOne();
+ }
-var barEpoch = s.getDB('config').chunks.findOne({ ns: 'alleyinsider.bar' }).lastmodEpoch;
-assert.commandWorked(a2.runCommand({ setShardVersion: "alleyinsider.bar",
- configdb: s._configDB,
- version: new Timestamp(1, 0),
- versionEpoch: barEpoch,
- shard: 'shard0000',
- authoritative: true }),
- "setShardVersion bar temp");
-
-assert.throws(simpleFindOne, [], "should complain about not in sharded mode 1");
+ var barEpoch = s.getDB('config').chunks.findOne({ns: 'alleyinsider.bar'}).lastmodEpoch;
+ assert.commandWorked(a2.runCommand({
+ setShardVersion: "alleyinsider.bar",
+ configdb: s._configDB,
+ version: new Timestamp(1, 0),
+ versionEpoch: barEpoch,
+ shard: 'shard0000',
+ authoritative: true
+ }),
+ "setShardVersion bar temp");
+ assert.throws(simpleFindOne, [], "should complain about not in sharded mode 1");
-// the only way that setSharVersion passes is if the shard agrees with the version
-// the shard takes its version from config directly
-// TODO bump timestamps in config
-// assert(a2.runCommand({ "setShardVersion": "alleyinsider.foo", configdb: s._configDB, version: 2 }).ok == 1, "setShardVersion a2-1");
+ // the only way that setSharVersion passes is if the shard agrees with the version
+ // the shard takes its version from config directly
+ // TODO bump timestamps in config
+ // assert(a2.runCommand({ "setShardVersion": "alleyinsider.foo", configdb: s._configDB, version:
+ // 2 }).ok == 1, "setShardVersion a2-1");
-// simpleFindOne(); // now should run ok
+ // simpleFindOne(); // now should run ok
-// assert(a2.runCommand({ "setShardVersion": "alleyinsider.foo", configdb: s._configDB, version: 3 }).ok == 1, "setShardVersion a2-2");
+ // assert(a2.runCommand({ "setShardVersion": "alleyinsider.foo", configdb: s._configDB, version:
+ // 3 }).ok == 1, "setShardVersion a2-2");
-// simpleFindOne(); // newer version is ok
+ // simpleFindOne(); // newer version is ok
-s.stop();
+ s.stop();
})();
diff --git a/jstests/sharding/write_cmd_auto_split.js b/jstests/sharding/write_cmd_auto_split.js
index dc8abc71597..110fa7ddd9f 100644
--- a/jstests/sharding/write_cmd_auto_split.js
+++ b/jstests/sharding/write_cmd_auto_split.js
@@ -2,154 +2,157 @@
* Tests the auto split will be triggered when using write commands.
*/
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({ shards: 1, other: { chunkSize: 1 }});
+ var st = new ShardingTest({shards: 1, other: {chunkSize: 1}});
-var configDB = st.s.getDB('config');
-assert.commandWorked(configDB.adminCommand({ enableSharding: 'test' }));
-assert.commandWorked(configDB.adminCommand({ shardCollection: 'test.insert', key: { x: 1 }}));
+ var configDB = st.s.getDB('config');
+ assert.commandWorked(configDB.adminCommand({enableSharding: 'test'}));
+ assert.commandWorked(configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}}));
-var doc1k = (new Array(1024)).join('x');
-var testDB = st.s.getDB('test');
+ var doc1k = (new Array(1024)).join('x');
+ var testDB = st.s.getDB('test');
-jsTest.log('Test single batch insert should auto-split');
+ jsTest.log('Test single batch insert should auto-split');
-assert.eq(1, configDB.chunks.find().itcount());
+ assert.eq(1, configDB.chunks.find().itcount());
-// Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so
-// we are going to be conservative.
-for (var x = 0; x < 3100; x++) {
- var res = testDB.runCommand({ insert: 'insert',
- documents: [{ x: x, v: doc1k }],
- ordered: false,
- writeConcern: { w: 1 }});
+ // Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so
+ // we are going to be conservative.
+ for (var x = 0; x < 3100; x++) {
+ var res = testDB.runCommand({
+ insert: 'insert',
+ documents: [{x: x, v: doc1k}],
+ ordered: false,
+ writeConcern: {w: 1}
+ });
- assert(res.ok, 'insert failed: ' + tojson(res));
-}
+ assert(res.ok, 'insert failed: ' + tojson(res));
+ }
-// Inserted batch is a multiple of the chunkSize, expect the chunks to split into
-// more than 2.
-assert.gt(configDB.chunks.find().itcount(), 2);
-testDB.dropDatabase();
+ // Inserted batch is a multiple of the chunkSize, expect the chunks to split into
+ // more than 2.
+ assert.gt(configDB.chunks.find().itcount(), 2);
+ testDB.dropDatabase();
-jsTest.log('Test single batch update should auto-split');
+ jsTest.log('Test single batch update should auto-split');
-configDB.adminCommand({ enableSharding: 'test' });
-configDB.adminCommand({ shardCollection: 'test.update', key: { x: 1 }});
+ configDB.adminCommand({enableSharding: 'test'});
+ configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}});
-assert.eq(1, configDB.chunks.find().itcount());
+ assert.eq(1, configDB.chunks.find().itcount());
-for (var x = 0; x < 1100; x++) {
- var res = testDB.runCommand({ update: 'update',
- updates: [{ q: { x: x }, u: { x: x, v: doc1k }, upsert: true }],
- ordered: false,
- writeConcern: { w: 1 }});
+ for (var x = 0; x < 1100; x++) {
+ var res = testDB.runCommand({
+ update: 'update',
+ updates: [{q: {x: x}, u: {x: x, v: doc1k}, upsert: true}],
+ ordered: false,
+ writeConcern: {w: 1}
+ });
- assert(res.ok, 'update failed: ' + tojson(res));
-}
+ assert(res.ok, 'update failed: ' + tojson(res));
+ }
-assert.gt(configDB.chunks.find().itcount(), 1);
-testDB.dropDatabase();
+ assert.gt(configDB.chunks.find().itcount(), 1);
+ testDB.dropDatabase();
-jsTest.log('Test single delete should not auto-split');
+ jsTest.log('Test single delete should not auto-split');
-configDB.adminCommand({ enableSharding: 'test' });
-configDB.adminCommand({ shardCollection: 'test.delete', key: { x: 1 }});
+ configDB.adminCommand({enableSharding: 'test'});
+ configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}});
-assert.eq(1, configDB.chunks.find().itcount());
+ assert.eq(1, configDB.chunks.find().itcount());
-for (var x = 0; x < 1100; x++) {
- var res = testDB.runCommand({ delete: 'delete',
- deletes: [{ q: { x: x, v: doc1k }, limit : NumberInt(0) }],
- ordered: false,
- writeConcern: { w: 1 }});
+ for (var x = 0; x < 1100; x++) {
+ var res = testDB.runCommand({
+ delete: 'delete',
+ deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
+ ordered: false,
+ writeConcern: {w: 1}
+ });
- assert(res.ok, 'delete failed: ' + tojson(res));
-}
+ assert(res.ok, 'delete failed: ' + tojson(res));
+ }
-assert.eq(1, configDB.chunks.find().itcount());
-testDB.dropDatabase();
+ assert.eq(1, configDB.chunks.find().itcount());
+ testDB.dropDatabase();
-jsTest.log('Test batched insert should auto-split');
+ jsTest.log('Test batched insert should auto-split');
-configDB.adminCommand({ enableSharding: 'test' });
-configDB.adminCommand({ shardCollection: 'test.insert', key: { x: 1 }});
+ configDB.adminCommand({enableSharding: 'test'});
+ configDB.adminCommand({shardCollection: 'test.insert', key: {x: 1}});
-assert.eq(1, configDB.chunks.find().itcount());
+ assert.eq(1, configDB.chunks.find().itcount());
-// Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so
-// we are going to be conservative.
-for (var x = 0; x < 1100; x += 400) {
- var docs = [];
+ // Note: Estimated 'chunk size' tracked by mongos is initialized with a random value so
+ // we are going to be conservative.
+ for (var x = 0; x < 1100; x += 400) {
+ var docs = [];
- for (var y = 0; y < 400; y++) {
- docs.push({ x: (x + y), v: doc1k });
- }
+ for (var y = 0; y < 400; y++) {
+ docs.push({x: (x + y), v: doc1k});
+ }
- var res = testDB.runCommand({ insert: 'insert',
- documents: docs,
- ordered: false,
- writeConcern: { w: 1 }});
+ var res = testDB.runCommand(
+ {insert: 'insert', documents: docs, ordered: false, writeConcern: {w: 1}});
+ assert(res.ok, 'insert failed: ' + tojson(res));
+ }
+
+ assert.gt(configDB.chunks.find().itcount(), 1);
+ testDB.dropDatabase();
- assert(res.ok, 'insert failed: ' + tojson(res));
-}
+ jsTest.log('Test batched update should auto-split');
-assert.gt(configDB.chunks.find().itcount(), 1);
-testDB.dropDatabase();
+ configDB.adminCommand({enableSharding: 'test'});
+ configDB.adminCommand({shardCollection: 'test.update', key: {x: 1}});
-jsTest.log('Test batched update should auto-split');
+ assert.eq(1, configDB.chunks.find().itcount());
-configDB.adminCommand({ enableSharding: 'test' });
-configDB.adminCommand({ shardCollection: 'test.update', key: { x: 1 }});
+ for (var x = 0; x < 1100; x += 400) {
+ var docs = [];
-assert.eq(1, configDB.chunks.find().itcount());
+ for (var y = 0; y < 400; y++) {
+ var id = x + y;
+ docs.push({q: {x: id}, u: {x: id, v: doc1k}, upsert: true});
+ }
-for (var x = 0; x < 1100; x += 400) {
- var docs = [];
+ var res = testDB.runCommand(
+ {update: 'update', updates: docs, ordered: false, writeConcern: {w: 1}});
- for (var y = 0; y < 400; y++) {
- var id = x + y;
- docs.push({ q: { x: id }, u: { x: id, v: doc1k }, upsert: true });
+ assert(res.ok, 'update failed: ' + tojson(res));
}
- var res = testDB.runCommand({ update: 'update',
- updates: docs,
- ordered: false,
- writeConcern: { w: 1 }});
+ assert.gt(configDB.chunks.find().itcount(), 1);
+ testDB.dropDatabase();
- assert(res.ok, 'update failed: ' + tojson(res));
-}
+ jsTest.log('Test batched delete should not auto-split');
-assert.gt(configDB.chunks.find().itcount(), 1);
-testDB.dropDatabase();
+ configDB.adminCommand({enableSharding: 'test'});
+ configDB.adminCommand({shardCollection: 'test.delete', key: {x: 1}});
-jsTest.log('Test batched delete should not auto-split');
+ assert.eq(1, configDB.chunks.find().itcount());
-configDB.adminCommand({ enableSharding: 'test' });
-configDB.adminCommand({ shardCollection: 'test.delete', key: { x: 1 }});
+ for (var x = 0; x < 1100; x += 400) {
+ var docs = [];
-assert.eq(1, configDB.chunks.find().itcount());
+ for (var y = 0; y < 400; y++) {
+ var id = x + y;
+ docs.push({q: {x: id, v: doc1k}, top: 0});
+ }
-for (var x = 0; x < 1100; x += 400) {
- var docs = [];
+ var res = testDB.runCommand({
+ delete: 'delete',
+ deletes: [{q: {x: x, v: doc1k}, limit: NumberInt(0)}],
+ ordered: false,
+ writeConcern: {w: 1}
+ });
- for (var y = 0; y < 400; y++) {
- var id = x + y;
- docs.push({ q: { x: id, v: doc1k }, top: 0 });
+ assert(res.ok, 'delete failed: ' + tojson(res));
}
- var res = testDB.runCommand({ delete: 'delete',
- deletes: [{ q: { x: x, v: doc1k }, limit : NumberInt(0) }],
- ordered: false,
- writeConcern: { w: 1 }});
-
- assert(res.ok, 'delete failed: ' + tojson(res));
-}
-
-assert.eq(1, configDB.chunks.find().itcount());
+ assert.eq(1, configDB.chunks.find().itcount());
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/write_commands_sharding_state.js b/jstests/sharding/write_commands_sharding_state.js
index 7d0991870eb..ee4bf78958e 100644
--- a/jstests/sharding/write_commands_sharding_state.js
+++ b/jstests/sharding/write_commands_sharding_state.js
@@ -3,79 +3,80 @@
// @tags: [requires_persistence]
(function() {
-'use strict';
+ 'use strict';
-var st = new ShardingTest({name: "write_commands", mongos: 2, shards: 2 });
+ var st = new ShardingTest({name: "write_commands", mongos: 2, shards: 2});
-var dbTestName = 'WriteCommandsTestDB';
+ var dbTestName = 'WriteCommandsTestDB';
-assert.commandWorked(st.s0.adminCommand({ enablesharding: dbTestName }));
-st.ensurePrimaryShard(dbTestName, 'shard0000');
+ assert.commandWorked(st.s0.adminCommand({enablesharding: dbTestName}));
+ st.ensurePrimaryShard(dbTestName, 'shard0000');
-assert.commandWorked(st.s0.adminCommand({ shardCollection: dbTestName + '.TestColl',
- key: { Key: 1 },
- unique: true }));
+ assert.commandWorked(st.s0.adminCommand(
+ {shardCollection: dbTestName + '.TestColl', key: {Key: 1}, unique: true}));
-// Split at keys 10 and 20
-assert.commandWorked(st.s0.adminCommand({ split: dbTestName + '.TestColl', middle: { Key: 10 } }));
-assert.commandWorked(st.s0.adminCommand({ split: dbTestName + '.TestColl', middle: { Key: 20 } }));
+ // Split at keys 10 and 20
+ assert.commandWorked(st.s0.adminCommand({split: dbTestName + '.TestColl', middle: {Key: 10}}));
+ assert.commandWorked(st.s0.adminCommand({split: dbTestName + '.TestColl', middle: {Key: 20}}));
-printjson(st.config.getSiblingDB('config').chunks.find().toArray());
+ printjson(st.config.getSiblingDB('config').chunks.find().toArray());
-// Move < 10 to shard0000, 10 and 20 to shard00001
-st.s0.adminCommand({ moveChunk: dbTestName + '.TestColl', find: { Key: 0 }, to: 'shard0000' });
-st.s0.adminCommand({ moveChunk: dbTestName + '.TestColl', find: { Key: 19 }, to: 'shard0001' });
-st.s0.adminCommand({ moveChunk: dbTestName + '.TestColl', find: { Key: 21 }, to: 'shard0001' });
+ // Move < 10 to shard0000, 10 and 20 to shard00001
+ st.s0.adminCommand({moveChunk: dbTestName + '.TestColl', find: {Key: 0}, to: 'shard0000'});
+ st.s0.adminCommand({moveChunk: dbTestName + '.TestColl', find: {Key: 19}, to: 'shard0001'});
+ st.s0.adminCommand({moveChunk: dbTestName + '.TestColl', find: {Key: 21}, to: 'shard0001'});
-printjson(st.config.getSiblingDB('config').chunks.find().toArray());
+ printjson(st.config.getSiblingDB('config').chunks.find().toArray());
-// Insert one document in each chunk, which we will use to change
-assert(st.s1.getDB(dbTestName).TestColl.insert({ Key: 1 }));
-assert(st.s1.getDB(dbTestName).TestColl.insert({ Key: 11 }));
-assert(st.s1.getDB(dbTestName).TestColl.insert({ Key: 21 }));
+ // Insert one document in each chunk, which we will use to change
+ assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 1}));
+ assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 11}));
+ assert(st.s1.getDB(dbTestName).TestColl.insert({Key: 21}));
-// Make sure the documents are correctly placed
-printjson(st.d0.getDB(dbTestName).TestColl.find().toArray());
-printjson(st.d1.getDB(dbTestName).TestColl.find().toArray());
+ // Make sure the documents are correctly placed
+ printjson(st.d0.getDB(dbTestName).TestColl.find().toArray());
+ printjson(st.d1.getDB(dbTestName).TestColl.find().toArray());
-assert.eq(1, st.d0.getDB(dbTestName).TestColl.count());
-assert.eq(2, st.d1.getDB(dbTestName).TestColl.count());
+ assert.eq(1, st.d0.getDB(dbTestName).TestColl.count());
+ assert.eq(2, st.d1.getDB(dbTestName).TestColl.count());
-assert.eq(1, st.d0.getDB(dbTestName).TestColl.find({ Key: 1 }).count());
-assert.eq(1, st.d1.getDB(dbTestName).TestColl.find({ Key: 11 }).count());
-assert.eq(1, st.d1.getDB(dbTestName).TestColl.find({ Key: 21 }).count());
+ assert.eq(1, st.d0.getDB(dbTestName).TestColl.find({Key: 1}).count());
+ assert.eq(1, st.d1.getDB(dbTestName).TestColl.find({Key: 11}).count());
+ assert.eq(1, st.d1.getDB(dbTestName).TestColl.find({Key: 21}).count());
-// Move chunk [0, 19] to shard0000 and make sure the documents are correctly placed
-st.s0.adminCommand({ moveChunk: dbTestName + '.TestColl', find: { Key: 19 }, to: 'shard0000' });
+ // Move chunk [0, 19] to shard0000 and make sure the documents are correctly placed
+ st.s0.adminCommand({moveChunk: dbTestName + '.TestColl', find: {Key: 19}, to: 'shard0000'});
-printjson(st.config.getSiblingDB('config').chunks.find().toArray());
-printjson(st.d0.getDB(dbTestName).TestColl.find({}).toArray());
-printjson(st.d1.getDB(dbTestName).TestColl.find({}).toArray());
+ printjson(st.config.getSiblingDB('config').chunks.find().toArray());
+ printjson(st.d0.getDB(dbTestName).TestColl.find({}).toArray());
+ printjson(st.d1.getDB(dbTestName).TestColl.find({}).toArray());
-// Now restart all mongod instances, so they don't know yet that they are sharded
-st.restartMongod(0);
-st.restartMongod(1);
+ // Now restart all mongod instances, so they don't know yet that they are sharded
+ st.restartMongod(0);
+ st.restartMongod(1);
-// Now that both mongod shards are restarted, they don't know yet that they are part of a sharded
-// cluster until they get a setShardVerion command. Mongos instance s1 has stale metadata and
-// doesn't know that chunk with key 19 has moved to shard0000 so it will send it to shard0001 at
-// first.
-//
-// Shard0001 would only send back a stale config exception if it receives a setShardVersion
-// command. The bug that this test validates is that setShardVersion is indeed being sent (for more
-// information, see SERVER-19395).
-st.s1.getDB(dbTestName).TestColl.update({ Key: 11 }, { $inc: { Counter: 1 } }, { upsert: true });
+ // Now that both mongod shards are restarted, they don't know yet that they are part of a
+ // sharded
+ // cluster until they get a setShardVerion command. Mongos instance s1 has stale metadata and
+ // doesn't know that chunk with key 19 has moved to shard0000 so it will send it to shard0001 at
+ // first.
+ //
+ // Shard0001 would only send back a stale config exception if it receives a setShardVersion
+ // command. The bug that this test validates is that setShardVersion is indeed being sent (for
+ // more
+ // information, see SERVER-19395).
+ st.s1.getDB(dbTestName).TestColl.update({Key: 11}, {$inc: {Counter: 1}}, {upsert: true});
-printjson(st.d0.getDB(dbTestName).TestColl.find({}).toArray());
-printjson(st.d1.getDB(dbTestName).TestColl.find({}).toArray());
+ printjson(st.d0.getDB(dbTestName).TestColl.find({}).toArray());
+ printjson(st.d1.getDB(dbTestName).TestColl.find({}).toArray());
-assert.eq(2, st.d0.getDB(dbTestName).TestColl.count());
-assert.eq(1, st.d1.getDB(dbTestName).TestColl.count());
+ assert.eq(2, st.d0.getDB(dbTestName).TestColl.count());
+ assert.eq(1, st.d1.getDB(dbTestName).TestColl.count());
-assert.eq(1, st.d0.getDB(dbTestName).TestColl.find({ Key: 1 }).count());
-assert.eq(1, st.d0.getDB(dbTestName).TestColl.find({ Key: 11 }).count());
-assert.eq(1, st.d1.getDB(dbTestName).TestColl.find({ Key: 21 }).count());
+ assert.eq(1, st.d0.getDB(dbTestName).TestColl.find({Key: 1}).count());
+ assert.eq(1, st.d0.getDB(dbTestName).TestColl.find({Key: 11}).count());
+ assert.eq(1, st.d1.getDB(dbTestName).TestColl.find({Key: 21}).count());
-st.stop();
+ st.stop();
})();
diff --git a/jstests/sharding/zbigMapReduce.js b/jstests/sharding/zbigMapReduce.js
index d9b771b3d51..fda81e12df8 100644
--- a/jstests/sharding/zbigMapReduce.js
+++ b/jstests/sharding/zbigMapReduce.js
@@ -1,21 +1,25 @@
// This test is skipped on 32-bit platforms
function setupTest() {
- var s = new ShardingTest({ shards: 2,
- mongos: 1,
- other: { rs: true,
- numReplicas: 2,
- chunkSize: 1,
- rsOptions: { oplogSize: 50 },
- enableBalancer: 1 } });
+ var s = new ShardingTest({
+ shards: 2,
+ mongos: 1,
+ other: {
+ rs: true,
+ numReplicas: 2,
+ chunkSize: 1,
+ rsOptions: {oplogSize: 50},
+ enableBalancer: 1
+ }
+ });
// Reduce chunk size to split
var config = s.getDB("config");
config.settings.save({_id: "chunksize", value: 1});
- assert.commandWorked(s.s0.adminCommand({ enablesharding: "test" }));
+ assert.commandWorked(s.s0.adminCommand({enablesharding: "test"}));
s.ensurePrimaryShard('test', 'test-rs0');
- assert.commandWorked(s.s0.adminCommand({ shardcollection: "test.foo", key: { "_id": 1 } }));
+ assert.commandWorked(s.s0.adminCommand({shardcollection: "test.foo", key: {"_id": 1}}));
return s;
}
@@ -29,17 +33,18 @@ function runTest(s) {
if (db.serverBuildInfo().bits == 32) {
// Make data ~0.5MB for 32 bit builds
- for (var i = 0; i < 512; i++) str += "a";
- }
- else {
+ for (var i = 0; i < 512; i++)
+ str += "a";
+ } else {
// Make data ~4MB
- for (var i = 0; i < 4*1024; i++) str += "a";
+ for (var i = 0; i < 4 * 1024; i++)
+ str += "a";
}
var bulk = db.foo.initializeUnorderedBulkOp();
- for (j=0; j<100; j++) {
- for (i=0; i<512; i++) {
- bulk.insert({ i: idInc++, val: valInc++, y:str });
+ for (j = 0; j < 100; j++) {
+ for (i = 0; i < 512; i++) {
+ bulk.insert({i: idInc++, val: valInc++, y: str});
}
}
assert.writeOK(bulk.execute());
@@ -56,11 +61,12 @@ function runTest(s) {
print("Shard 1: " + s.shard1.getCollection(db.foo + "").find().itcount());
for (var i = 0; i < 51200; i++) {
- if(!db.foo.findOne({ i: i }, { i: 1 })) {
+ if (!db.foo.findOne({i: i}, {i: 1})) {
print("Could not find: " + i);
}
- if(i % 100 == 0) print("Checked " + i);
+ if (i % 100 == 0)
+ print("Checked " + i);
}
print("PROBABLY WILL ASSERT NOW");
@@ -79,15 +85,19 @@ function runTest(s) {
s.printChunks();
s.printChangeLog();
- function map() { emit('count', 1); }
- function reduce(key, values) { return Array.sum(values); }
+ function map() {
+ emit('count', 1);
+ }
+ function reduce(key, values) {
+ return Array.sum(values);
+ }
jsTest.log("Test basic mapreduce...");
// Test basic mapReduce
for (var iter = 0; iter < 5; iter++) {
print("Test #" + iter);
- out = db.foo.mapReduce(map, reduce,"big_out");
+ out = db.foo.mapReduce(map, reduce, "big_out");
}
print("Testing output to different db...");
@@ -104,7 +114,7 @@ function runTest(s) {
print("Testing mr replace into DB " + iter);
- res = db.foo.mapReduce(map , reduce , { out: { replace: outCollStr, db: outDbStr } });
+ res = db.foo.mapReduce(map, reduce, {out: {replace: outCollStr, db: outDbStr}});
printjson(res);
outDb = s.getDB(outDbStr);
@@ -112,7 +122,7 @@ function runTest(s) {
obj = outColl.convertToSingleObject("value");
- assert.eq(51200 , obj.count , "Received wrong result " + obj.count);
+ assert.eq(51200, obj.count, "Received wrong result " + obj.count);
print("checking result field");
assert.eq(res.result.collection, outCollStr, "Wrong collection " + res.result.collection);
@@ -123,81 +133,85 @@ function runTest(s) {
// check nonAtomic output
assert.throws(function() {
- db.foo.mapReduce(map, reduce, { out: {replace: "big_out", nonAtomic: true } });
+ db.foo.mapReduce(map, reduce, {out: {replace: "big_out", nonAtomic: true}});
});
jsTest.log();
// Add docs with dup "i"
valInc = 0;
- for (j=0; j<100; j++) {
+ for (j = 0; j < 100; j++) {
print("Inserted document: " + (j * 100));
bulk = db.foo.initializeUnorderedBulkOp();
- for (i=0; i<512; i++) {
- bulk.insert({ i: idInc++, val: valInc++, y: str });
+ for (i = 0; i < 512; i++) {
+ bulk.insert({i: idInc++, val: valInc++, y: str});
}
// wait for replication to catch up
- assert.writeOK(bulk.execute({ w: 2 }));
+ assert.writeOK(bulk.execute({w: 2}));
}
jsTest.log("No errors...");
- map2 = function() { emit(this.val, 1); };
- reduce2 = function(key, values) { return Array.sum(values); };
+ map2 = function() {
+ emit(this.val, 1);
+ };
+ reduce2 = function(key, values) {
+ return Array.sum(values);
+ };
// Test merge
outcol = "big_out_merge";
// M/R quarter of the docs
jsTestLog("Test A");
- out = db.foo.mapReduce(map2, reduce2, { query: {i: {$lt: 25600} }, out: { merge: outcol } });
+ out = db.foo.mapReduce(map2, reduce2, {query: {i: {$lt: 25600}}, out: {merge: outcol}});
printjson(out);
- assert.eq(25600 , out.counts.emit , "Received wrong result");
- assert.eq(25600 , out.counts.output , "Received wrong result");
+ assert.eq(25600, out.counts.emit, "Received wrong result");
+ assert.eq(25600, out.counts.output, "Received wrong result");
// M/R further docs
jsTestLog("Test B");
out = db.foo.mapReduce(
- map2, reduce2, { query: {i: {$gte: 25600, $lt: 51200} }, out: { merge: outcol } });
+ map2, reduce2, {query: {i: {$gte: 25600, $lt: 51200}}, out: {merge: outcol}});
printjson(out);
- assert.eq(25600 , out.counts.emit , "Received wrong result");
- assert.eq(51200 , out.counts.output , "Received wrong result");
+ assert.eq(25600, out.counts.emit, "Received wrong result");
+ assert.eq(51200, out.counts.output, "Received wrong result");
// M/R do 2nd half of docs
jsTestLog("Test C");
out = db.foo.mapReduce(
- map2, reduce2, { query: {i: {$gte: 51200} }, out: { merge: outcol, nonAtomic: true } });
+ map2, reduce2, {query: {i: {$gte: 51200}}, out: {merge: outcol, nonAtomic: true}});
printjson(out);
- assert.eq(51200 , out.counts.emit , "Received wrong result");
- assert.eq(51200 , out.counts.output , "Received wrong result");
- assert.eq(1 , db[outcol].findOne().value , "Received wrong result");
+ assert.eq(51200, out.counts.emit, "Received wrong result");
+ assert.eq(51200, out.counts.output, "Received wrong result");
+ assert.eq(1, db[outcol].findOne().value, "Received wrong result");
// Test reduce
jsTestLog("Test D");
outcol = "big_out_reduce";
// M/R quarter of the docs
- out = db.foo.mapReduce(map2, reduce2,{ query: { i: { $lt: 25600 } }, out: { reduce: outcol } });
+ out = db.foo.mapReduce(map2, reduce2, {query: {i: {$lt: 25600}}, out: {reduce: outcol}});
printjson(out);
- assert.eq(25600 , out.counts.emit , "Received wrong result");
- assert.eq(25600 , out.counts.output , "Received wrong result");
+ assert.eq(25600, out.counts.emit, "Received wrong result");
+ assert.eq(25600, out.counts.output, "Received wrong result");
// M/R further docs
jsTestLog("Test E");
out = db.foo.mapReduce(
- map2, reduce2, { query: { i: { $gte: 25600, $lt: 51200 } }, out: { reduce: outcol } });
+ map2, reduce2, {query: {i: {$gte: 25600, $lt: 51200}}, out: {reduce: outcol}});
printjson(out);
- assert.eq(25600 , out.counts.emit , "Received wrong result");
- assert.eq(51200 , out.counts.output , "Received wrong result");
+ assert.eq(25600, out.counts.emit, "Received wrong result");
+ assert.eq(51200, out.counts.output, "Received wrong result");
// M/R do 2nd half of docs
jsTestLog("Test F");
out = db.foo.mapReduce(
- map2, reduce2, { query: { i: {$gte: 51200} }, out: { reduce: outcol, nonAtomic: true } });
+ map2, reduce2, {query: {i: {$gte: 51200}}, out: {reduce: outcol, nonAtomic: true}});
printjson(out);
- assert.eq(51200 , out.counts.emit , "Received wrong result");
- assert.eq(51200 , out.counts.output , "Received wrong result");
- assert.eq(2 , db[outcol].findOne().value , "Received wrong result");
+ assert.eq(51200, out.counts.emit, "Received wrong result");
+ assert.eq(51200, out.counts.output, "Received wrong result");
+ assert.eq(2, db[outcol].findOne().value, "Received wrong result");
// Verify that data is also on secondary
jsTestLog("Test G");
@@ -208,9 +222,9 @@ function runTest(s) {
// that replication can keep up even on slow machines.
s.stopBalancer();
s._rs[0].test.awaitReplication(300 * 1000);
- assert.eq(51200 , primary.getDB("test")[outcol].count() , "Wrong count");
+ assert.eq(51200, primary.getDB("test")[outcol].count(), "Wrong count");
for (var i = 0; i < secondaries.length; ++i) {
- assert.eq(51200 , secondaries[i].getDB("test")[outcol].count() , "Wrong count");
+ assert.eq(51200, secondaries[i].getDB("test")[outcol].count(), "Wrong count");
}
}
@@ -218,8 +232,7 @@ var s = setupTest();
if (s.getDB("admin").runCommand("buildInfo").bits < 64) {
print("Skipping test on 32-bit platforms");
-}
-else {
+} else {
runTest(s);
}
diff --git a/jstests/sharding/zero_shard_version.js b/jstests/sharding/zero_shard_version.js
index 20fae7ac522..9f15e247e83 100644
--- a/jstests/sharding/zero_shard_version.js
+++ b/jstests/sharding/zero_shard_version.js
@@ -3,178 +3,175 @@
* against a major version of zero or incompatible epochs.
*/
(function() {
-'use strict';
-
-var st = new ShardingTest({ shards: 2, mongos: 4 });
-
-var testDB_s0 = st.s.getDB('test');
-assert.commandWorked(testDB_s0.adminCommand({ enableSharding: 'test' }));
-st.ensurePrimaryShard('test', 'shard0001');
-assert.commandWorked(testDB_s0.adminCommand({ shardCollection: 'test.user', key: { x: 1 }}));
-
-var checkShardMajorVersion = function(conn, expectedVersion) {
- var shardVersionInfo = conn.adminCommand({ getShardVersion: 'test.user' });
- assert.eq(expectedVersion, shardVersionInfo.global.getTime());
-};
+ 'use strict';
-///////////////////////////////////////////////////////
-// Test shard with empty chunk
+ var st = new ShardingTest({shards: 2, mongos: 4});
+
+ var testDB_s0 = st.s.getDB('test');
+ assert.commandWorked(testDB_s0.adminCommand({enableSharding: 'test'}));
+ st.ensurePrimaryShard('test', 'shard0001');
+ assert.commandWorked(testDB_s0.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
-// shard0: 0|0|a
-// shard1: 1|0|a, [-inf, inf)
-// mongos0: 1|0|a
-
-var testDB_s1 = st.s1.getDB('test');
-assert.writeOK(testDB_s1.user.insert({ x: 1 }));
-assert.commandWorked(testDB_s1.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0000' }));
-
-// Official config:
-// shard0: 2|0|a, [-inf, inf)
-// shard1: 0|0|a
-//
-// Shard metadata:
-// shard0: 0|0|a
-// shard1: 0|0|a
-// mongos0: 1|0|a
-
-checkShardMajorVersion(st.d0, 0);
-checkShardMajorVersion(st.d1, 0);
-
-// mongos0 still thinks that { x: 1 } belong to shard0001, but should be able to
-// refresh it's metadata correctly.
-assert.neq(null, testDB_s0.user.findOne({ x: 1 }));
+ var checkShardMajorVersion = function(conn, expectedVersion) {
+ var shardVersionInfo = conn.adminCommand({getShardVersion: 'test.user'});
+ assert.eq(expectedVersion, shardVersionInfo.global.getTime());
+ };
-checkShardMajorVersion(st.d0, 2);
-checkShardMajorVersion(st.d1, 0);
-
-// Set mongos2 & mongos3 to version 2|0|a
-var testDB_s2 = st.s2.getDB('test');
-assert.neq(null, testDB_s2.user.findOne({ x: 1 }));
-
-var testDB_s3 = st.s3.getDB('test');
-assert.neq(null, testDB_s3.user.findOne({ x: 1 }));
-
-///////////////////////////////////////////////////////
-// Test unsharded collection
-// mongos versions: s0, s2, s3: 2|0|a
-
-testDB_s1.user.drop();
-assert.writeOK(testDB_s1.user.insert({ x: 10 }));
-
-// shard0: 0|0|0
-// shard1: 0|0|0
-// mongos0: 2|0|a
-
-checkShardMajorVersion(st.d0, 0);
-checkShardMajorVersion(st.d1, 0);
-
-// mongos0 still thinks { x: 10 } belong to shard0000, but since coll is dropped,
-// query should be routed to primary shard.
-assert.neq(null, testDB_s0.user.findOne({ x: 10 }));
-
-checkShardMajorVersion(st.d0, 0);
-checkShardMajorVersion(st.d1, 0);
-
-///////////////////////////////////////////////////////
-// Test 2 shards with 1 chunk
-// mongos versions: s0: 0|0|0, s2, s3: 2|0|a
-
-testDB_s1.user.drop();
-testDB_s1.adminCommand({ shardCollection: 'test.user', key: { x: 1 }});
-testDB_s1.adminCommand({ split: 'test.user', middle: { x: 0 }});
-
-// shard0: 0|0|b,
-// shard1: 1|1|b, [-inf, 0), [0, inf)
-
-testDB_s1.user.insert({ x: 1 });
-testDB_s1.user.insert({ x: -11 });
-assert.commandWorked(testDB_s1.adminCommand({ moveChunk: 'test.user',
- find: { x: -1 },
- to: 'shard0000' }));
-
-// Official config:
-// shard0: 2|0|b, [-inf, 0)
-// shard1: 2|1|b, [0, inf)
-//
-// Shard metadata:
-// shard0: 0|0|b
-// shard1: 2|1|b
-//
-// mongos2: 2|0|a
-
-checkShardMajorVersion(st.d0, 0);
-checkShardMajorVersion(st.d1, 2);
-
-// mongos2 still thinks that { x: 1 } belong to shard0000, but should be able to
-// refresh it's metadata correctly.
-assert.neq(null, testDB_s2.user.findOne({ x: 1 }));
-
-checkShardMajorVersion(st.d0, 2);
-checkShardMajorVersion(st.d1, 2);
-
-// Set shard metadata to 2|0|b
-assert.neq(null, testDB_s2.user.findOne({ x: -11 }));
-
-checkShardMajorVersion(st.d0, 2);
-checkShardMajorVersion(st.d1, 2);
-
-// Official config:
-// shard0: 2|0|b, [-inf, 0)
-// shard1: 2|1|b, [0, inf)
-//
-// Shard metadata:
-// shard0: 2|0|b
-// shard1: 2|1|b
-//
-// mongos3: 2|0|a
-
-// 4th mongos still thinks that { x: 1 } belong to shard0000, but should be able to
-// refresh it's metadata correctly.
-assert.neq(null, testDB_s3.user.findOne({ x: 1 }));
-
-///////////////////////////////////////////////////////
-// Test mongos thinks unsharded when it's actually sharded
-// mongos current versions: s0: 0|0|0, s2, s3: 2|0|b
+ ///////////////////////////////////////////////////////
+ // Test shard with empty chunk
-// Set mongos0 to version 0|0|0
-testDB_s0.user.drop();
-
-checkShardMajorVersion(st.d0, 0);
-checkShardMajorVersion(st.d1, 0);
-
-assert.eq(null, testDB_s0.user.findOne({ x: 1 }));
+ // shard0: 0|0|a
+ // shard1: 1|0|a, [-inf, inf)
+ // mongos0: 1|0|a
+
+ var testDB_s1 = st.s1.getDB('test');
+ assert.writeOK(testDB_s1.user.insert({x: 1}));
+ assert.commandWorked(
+ testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: 'shard0000'}));
+
+ // Official config:
+ // shard0: 2|0|a, [-inf, inf)
+ // shard1: 0|0|a
+ //
+ // Shard metadata:
+ // shard0: 0|0|a
+ // shard1: 0|0|a
+ // mongos0: 1|0|a
+
+ checkShardMajorVersion(st.d0, 0);
+ checkShardMajorVersion(st.d1, 0);
+
+ // mongos0 still thinks that { x: 1 } belong to shard0001, but should be able to
+ // refresh it's metadata correctly.
+ assert.neq(null, testDB_s0.user.findOne({x: 1}));
-// Needs to also set mongos1 to version 0|0|0, otherwise it'll complain that collection is
-// already sharded.
-assert.eq(null, testDB_s1.user.findOne({ x: 1 }));
-assert.commandWorked(testDB_s1.adminCommand({ shardCollection: 'test.user', key: { x: 1 }}));
-testDB_s1.user.insert({ x: 1 });
-
-assert.commandWorked(testDB_s1.adminCommand({ moveChunk: 'test.user',
- find: { x: 0 },
- to: 'shard0000' }));
-
-// Official config:
-// shard0: 2|0|c, [-inf, inf)
-// shard1: 0|0|c
-//
-// Shard metadata:
-// shard0: 0|0|c
-// shard1: 0|0|c
-//
-// mongos0: 0|0|0
-
-checkShardMajorVersion(st.d0, 0);
-checkShardMajorVersion(st.d1, 0);
-
-// 1st mongos thinks that collection is unshareded and will attempt to query primary shard.
-assert.neq(null, testDB_s0.user.findOne({ x: 1 }));
-
-checkShardMajorVersion(st.d0, 2);
-checkShardMajorVersion(st.d1, 0);
-
-st.stop();
+ checkShardMajorVersion(st.d0, 2);
+ checkShardMajorVersion(st.d1, 0);
+
+ // Set mongos2 & mongos3 to version 2|0|a
+ var testDB_s2 = st.s2.getDB('test');
+ assert.neq(null, testDB_s2.user.findOne({x: 1}));
+
+ var testDB_s3 = st.s3.getDB('test');
+ assert.neq(null, testDB_s3.user.findOne({x: 1}));
+
+ ///////////////////////////////////////////////////////
+ // Test unsharded collection
+ // mongos versions: s0, s2, s3: 2|0|a
+
+ testDB_s1.user.drop();
+ assert.writeOK(testDB_s1.user.insert({x: 10}));
+
+ // shard0: 0|0|0
+ // shard1: 0|0|0
+ // mongos0: 2|0|a
+
+ checkShardMajorVersion(st.d0, 0);
+ checkShardMajorVersion(st.d1, 0);
+
+ // mongos0 still thinks { x: 10 } belong to shard0000, but since coll is dropped,
+ // query should be routed to primary shard.
+ assert.neq(null, testDB_s0.user.findOne({x: 10}));
+
+ checkShardMajorVersion(st.d0, 0);
+ checkShardMajorVersion(st.d1, 0);
+
+ ///////////////////////////////////////////////////////
+ // Test 2 shards with 1 chunk
+ // mongos versions: s0: 0|0|0, s2, s3: 2|0|a
+
+ testDB_s1.user.drop();
+ testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}});
+ testDB_s1.adminCommand({split: 'test.user', middle: {x: 0}});
+
+ // shard0: 0|0|b,
+ // shard1: 1|1|b, [-inf, 0), [0, inf)
+
+ testDB_s1.user.insert({x: 1});
+ testDB_s1.user.insert({x: -11});
+ assert.commandWorked(
+ testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: -1}, to: 'shard0000'}));
+
+ // Official config:
+ // shard0: 2|0|b, [-inf, 0)
+ // shard1: 2|1|b, [0, inf)
+ //
+ // Shard metadata:
+ // shard0: 0|0|b
+ // shard1: 2|1|b
+ //
+ // mongos2: 2|0|a
+
+ checkShardMajorVersion(st.d0, 0);
+ checkShardMajorVersion(st.d1, 2);
+
+ // mongos2 still thinks that { x: 1 } belong to shard0000, but should be able to
+ // refresh it's metadata correctly.
+ assert.neq(null, testDB_s2.user.findOne({x: 1}));
+
+ checkShardMajorVersion(st.d0, 2);
+ checkShardMajorVersion(st.d1, 2);
+
+ // Set shard metadata to 2|0|b
+ assert.neq(null, testDB_s2.user.findOne({x: -11}));
+
+ checkShardMajorVersion(st.d0, 2);
+ checkShardMajorVersion(st.d1, 2);
+
+ // Official config:
+ // shard0: 2|0|b, [-inf, 0)
+ // shard1: 2|1|b, [0, inf)
+ //
+ // Shard metadata:
+ // shard0: 2|0|b
+ // shard1: 2|1|b
+ //
+ // mongos3: 2|0|a
+
+ // 4th mongos still thinks that { x: 1 } belong to shard0000, but should be able to
+ // refresh it's metadata correctly.
+ assert.neq(null, testDB_s3.user.findOne({x: 1}));
+
+ ///////////////////////////////////////////////////////
+ // Test mongos thinks unsharded when it's actually sharded
+ // mongos current versions: s0: 0|0|0, s2, s3: 2|0|b
+
+ // Set mongos0 to version 0|0|0
+ testDB_s0.user.drop();
+
+ checkShardMajorVersion(st.d0, 0);
+ checkShardMajorVersion(st.d1, 0);
+
+ assert.eq(null, testDB_s0.user.findOne({x: 1}));
+
+ // Needs to also set mongos1 to version 0|0|0, otherwise it'll complain that collection is
+ // already sharded.
+ assert.eq(null, testDB_s1.user.findOne({x: 1}));
+ assert.commandWorked(testDB_s1.adminCommand({shardCollection: 'test.user', key: {x: 1}}));
+ testDB_s1.user.insert({x: 1});
+
+ assert.commandWorked(
+ testDB_s1.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: 'shard0000'}));
+
+ // Official config:
+ // shard0: 2|0|c, [-inf, inf)
+ // shard1: 0|0|c
+ //
+ // Shard metadata:
+ // shard0: 0|0|c
+ // shard1: 0|0|c
+ //
+ // mongos0: 0|0|0
+
+ checkShardMajorVersion(st.d0, 0);
+ checkShardMajorVersion(st.d1, 0);
+
+ // 1st mongos thinks that collection is unshareded and will attempt to query primary shard.
+ assert.neq(null, testDB_s0.user.findOne({x: 1}));
+
+ checkShardMajorVersion(st.d0, 2);
+ checkShardMajorVersion(st.d1, 0);
+
+ st.stop();
})();